Make mode_for_vector return an opt_mode
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob6d613c38b757cd76229b2c0add41ffac8ff939e1
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "attribs.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "explow.h"
51 #include "expr.h"
52 #include "output.h"
53 #include "dbxout.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "sched-int.h"
58 #include "gimplify.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
63 #include "intl.h"
64 #include "params.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
68 #include "builtins.h"
69 #include "context.h"
70 #include "tree-pass.h"
71 #include "except.h"
72 #if TARGET_XCOFF
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
74 #endif
75 #if TARGET_MACHO
76 #include "gstab.h" /* for N_SLINE */
77 #endif
78 #include "case-cfn-macros.h"
79 #include "ppc-auxv.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
87 #endif
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
94 /* Structure used to define the rs6000 stack */
95 typedef struct rs6000_stack {
96 int reload_completed; /* stack info won't change from here on */
97 int first_gp_reg_save; /* first callee saved GP register used */
98 int first_fp_reg_save; /* first callee saved FP register used */
99 int first_altivec_reg_save; /* first callee saved AltiVec register used */
100 int lr_save_p; /* true if the link reg needs to be saved */
101 int cr_save_p; /* true if the CR reg needs to be saved */
102 unsigned int vrsave_mask; /* mask of vec registers to save */
103 int push_p; /* true if we need to allocate stack space */
104 int calls_p; /* true if the function makes any calls */
105 int world_save_p; /* true if we're saving *everything*:
106 r13-r31, cr, f14-f31, vrsave, v20-v31 */
107 enum rs6000_abi abi; /* which ABI to use */
108 int gp_save_offset; /* offset to save GP regs from initial SP */
109 int fp_save_offset; /* offset to save FP regs from initial SP */
110 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
111 int lr_save_offset; /* offset to save LR from initial SP */
112 int cr_save_offset; /* offset to save CR from initial SP */
113 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
114 int varargs_save_offset; /* offset to save the varargs registers */
115 int ehrd_offset; /* offset to EH return data */
116 int ehcr_offset; /* offset to EH CR field data */
117 int reg_size; /* register size (4 or 8) */
118 HOST_WIDE_INT vars_size; /* variable save area size */
119 int parm_size; /* outgoing parameter size */
120 int save_size; /* save area size */
121 int fixed_size; /* fixed size of stack frame */
122 int gp_size; /* size of saved GP registers */
123 int fp_size; /* size of saved FP registers */
124 int altivec_size; /* size of saved AltiVec registers */
125 int cr_size; /* size to hold CR if not in fixed area */
126 int vrsave_size; /* size to hold VRSAVE */
127 int altivec_padding_size; /* size of altivec alignment padding */
128 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
129 int savres_strategy;
130 } rs6000_stack_t;
132 /* A C structure for machine-specific, per-function data.
133 This is added to the cfun structure. */
134 typedef struct GTY(()) machine_function
136 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
137 int ra_needs_full_frame;
138 /* Flags if __builtin_return_address (0) was used. */
139 int ra_need_lr;
140 /* Cache lr_save_p after expansion of builtin_eh_return. */
141 int lr_save_state;
142 /* Whether we need to save the TOC to the reserved stack location in the
143 function prologue. */
144 bool save_toc_in_prologue;
145 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
146 varargs save area. */
147 HOST_WIDE_INT varargs_save_offset;
148 /* Alternative internal arg pointer for -fsplit-stack. */
149 rtx split_stack_arg_pointer;
150 bool split_stack_argp_used;
151 /* Flag if r2 setup is needed with ELFv2 ABI. */
152 bool r2_setup_needed;
153 /* The number of components we use for separate shrink-wrapping. */
154 int n_components;
155 /* The components already handled by separate shrink-wrapping, which should
156 not be considered by the prologue and epilogue. */
157 bool gpr_is_wrapped_separately[32];
158 bool fpr_is_wrapped_separately[32];
159 bool lr_is_wrapped_separately;
160 } machine_function;
162 /* Support targetm.vectorize.builtin_mask_for_load. */
163 static GTY(()) tree altivec_builtin_mask_for_load;
165 /* Set to nonzero once AIX common-mode calls have been defined. */
166 static GTY(()) int common_mode_defined;
168 /* Label number of label created for -mrelocatable, to call to so we can
169 get the address of the GOT section */
170 static int rs6000_pic_labelno;
172 #ifdef USING_ELFOS_H
173 /* Counter for labels which are to be placed in .fixup. */
174 int fixuplabelno = 0;
175 #endif
177 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
178 int dot_symbols;
180 /* Specify the machine mode that pointers have. After generation of rtl, the
181 compiler makes no further distinction between pointers and any other objects
182 of this machine mode. */
183 scalar_int_mode rs6000_pmode;
185 /* Width in bits of a pointer. */
186 unsigned rs6000_pointer_size;
188 #ifdef HAVE_AS_GNU_ATTRIBUTE
189 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
190 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
191 # endif
192 /* Flag whether floating point values have been passed/returned.
193 Note that this doesn't say whether fprs are used, since the
194 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
195 should be set for soft-float values passed in gprs and ieee128
196 values passed in vsx registers. */
197 static bool rs6000_passes_float;
198 static bool rs6000_passes_long_double;
199 /* Flag whether vector values have been passed/returned. */
200 static bool rs6000_passes_vector;
201 /* Flag whether small (<= 8 byte) structures have been returned. */
202 static bool rs6000_returns_struct;
203 #endif
205 /* Value is TRUE if register/mode pair is acceptable. */
206 static bool rs6000_hard_regno_mode_ok_p
207 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
209 /* Maximum number of registers needed for a given register class and mode. */
210 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
212 /* How many registers are needed for a given register and mode. */
213 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
215 /* Map register number to register class. */
216 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
218 static int dbg_cost_ctrl;
220 /* Built in types. */
221 tree rs6000_builtin_types[RS6000_BTI_MAX];
222 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
224 /* Flag to say the TOC is initialized */
225 int toc_initialized, need_toc_init;
226 char toc_label_name[10];
228 /* Cached value of rs6000_variable_issue. This is cached in
229 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
230 static short cached_can_issue_more;
232 static GTY(()) section *read_only_data_section;
233 static GTY(()) section *private_data_section;
234 static GTY(()) section *tls_data_section;
235 static GTY(()) section *tls_private_data_section;
236 static GTY(()) section *read_only_private_data_section;
237 static GTY(()) section *sdata2_section;
238 static GTY(()) section *toc_section;
240 struct builtin_description
242 const HOST_WIDE_INT mask;
243 const enum insn_code icode;
244 const char *const name;
245 const enum rs6000_builtins code;
248 /* Describe the vector unit used for modes. */
249 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
250 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
252 /* Register classes for various constraints that are based on the target
253 switches. */
254 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
256 /* Describe the alignment of a vector. */
257 int rs6000_vector_align[NUM_MACHINE_MODES];
259 /* Map selected modes to types for builtins. */
260 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
262 /* What modes to automatically generate reciprocal divide estimate (fre) and
263 reciprocal sqrt (frsqrte) for. */
264 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
266 /* Masks to determine which reciprocal esitmate instructions to generate
267 automatically. */
268 enum rs6000_recip_mask {
269 RECIP_SF_DIV = 0x001, /* Use divide estimate */
270 RECIP_DF_DIV = 0x002,
271 RECIP_V4SF_DIV = 0x004,
272 RECIP_V2DF_DIV = 0x008,
274 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
275 RECIP_DF_RSQRT = 0x020,
276 RECIP_V4SF_RSQRT = 0x040,
277 RECIP_V2DF_RSQRT = 0x080,
279 /* Various combination of flags for -mrecip=xxx. */
280 RECIP_NONE = 0,
281 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
282 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
283 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
285 RECIP_HIGH_PRECISION = RECIP_ALL,
287 /* On low precision machines like the power5, don't enable double precision
288 reciprocal square root estimate, since it isn't accurate enough. */
289 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
292 /* -mrecip options. */
293 static struct
295 const char *string; /* option name */
296 unsigned int mask; /* mask bits to set */
297 } recip_options[] = {
298 { "all", RECIP_ALL },
299 { "none", RECIP_NONE },
300 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
301 | RECIP_V2DF_DIV) },
302 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
303 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
304 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
305 | RECIP_V2DF_RSQRT) },
306 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
307 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
310 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
311 static const struct
313 const char *cpu;
314 unsigned int cpuid;
315 } cpu_is_info[] = {
316 { "power9", PPC_PLATFORM_POWER9 },
317 { "power8", PPC_PLATFORM_POWER8 },
318 { "power7", PPC_PLATFORM_POWER7 },
319 { "power6x", PPC_PLATFORM_POWER6X },
320 { "power6", PPC_PLATFORM_POWER6 },
321 { "power5+", PPC_PLATFORM_POWER5_PLUS },
322 { "power5", PPC_PLATFORM_POWER5 },
323 { "ppc970", PPC_PLATFORM_PPC970 },
324 { "power4", PPC_PLATFORM_POWER4 },
325 { "ppca2", PPC_PLATFORM_PPCA2 },
326 { "ppc476", PPC_PLATFORM_PPC476 },
327 { "ppc464", PPC_PLATFORM_PPC464 },
328 { "ppc440", PPC_PLATFORM_PPC440 },
329 { "ppc405", PPC_PLATFORM_PPC405 },
330 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
333 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
334 static const struct
336 const char *hwcap;
337 int mask;
338 unsigned int id;
339 } cpu_supports_info[] = {
340 /* AT_HWCAP masks. */
341 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
342 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
343 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
344 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
345 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
346 { "booke", PPC_FEATURE_BOOKE, 0 },
347 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
348 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
349 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
350 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
351 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
352 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
353 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
354 { "notb", PPC_FEATURE_NO_TB, 0 },
355 { "pa6t", PPC_FEATURE_PA6T, 0 },
356 { "power4", PPC_FEATURE_POWER4, 0 },
357 { "power5", PPC_FEATURE_POWER5, 0 },
358 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
359 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
360 { "ppc32", PPC_FEATURE_32, 0 },
361 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
362 { "ppc64", PPC_FEATURE_64, 0 },
363 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
364 { "smt", PPC_FEATURE_SMT, 0 },
365 { "spe", PPC_FEATURE_HAS_SPE, 0 },
366 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
367 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
368 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
370 /* AT_HWCAP2 masks. */
371 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
372 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
373 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
374 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
375 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
376 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
377 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
378 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
379 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
380 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
381 { "darn", PPC_FEATURE2_DARN, 1 },
382 { "scv", PPC_FEATURE2_SCV, 1 }
385 /* On PowerPC, we have a limited number of target clones that we care about
386 which means we can use an array to hold the options, rather than having more
387 elaborate data structures to identify each possible variation. Order the
388 clones from the default to the highest ISA. */
389 enum {
390 CLONE_DEFAULT = 0, /* default clone. */
391 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
392 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
393 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
394 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
395 CLONE_MAX
398 /* Map compiler ISA bits into HWCAP names. */
399 struct clone_map {
400 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
401 const char *name; /* name to use in __builtin_cpu_supports. */
404 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
405 { 0, "" }, /* Default options. */
406 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
407 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
408 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
409 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
413 /* Newer LIBCs explicitly export this symbol to declare that they provide
414 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
415 reference to this symbol whenever we expand a CPU builtin, so that
416 we never link against an old LIBC. */
417 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
419 /* True if we have expanded a CPU builtin. */
420 bool cpu_builtin_p;
422 /* Pointer to function (in rs6000-c.c) that can define or undefine target
423 macros that have changed. Languages that don't support the preprocessor
424 don't link in rs6000-c.c, so we can't call it directly. */
425 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
427 /* Simplfy register classes into simpler classifications. We assume
428 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
429 check for standard register classes (gpr/floating/altivec/vsx) and
430 floating/vector classes (float/altivec/vsx). */
432 enum rs6000_reg_type {
433 NO_REG_TYPE,
434 PSEUDO_REG_TYPE,
435 GPR_REG_TYPE,
436 VSX_REG_TYPE,
437 ALTIVEC_REG_TYPE,
438 FPR_REG_TYPE,
439 SPR_REG_TYPE,
440 CR_REG_TYPE
443 /* Map register class to register type. */
444 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
446 /* First/last register type for the 'normal' register types (i.e. general
447 purpose, floating point, altivec, and VSX registers). */
448 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
450 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
453 /* Register classes we care about in secondary reload or go if legitimate
454 address. We only need to worry about GPR, FPR, and Altivec registers here,
455 along an ANY field that is the OR of the 3 register classes. */
457 enum rs6000_reload_reg_type {
458 RELOAD_REG_GPR, /* General purpose registers. */
459 RELOAD_REG_FPR, /* Traditional floating point regs. */
460 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
461 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
462 N_RELOAD_REG
465 /* For setting up register classes, loop through the 3 register classes mapping
466 into real registers, and skip the ANY class, which is just an OR of the
467 bits. */
468 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
469 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
471 /* Map reload register type to a register in the register class. */
472 struct reload_reg_map_type {
473 const char *name; /* Register class name. */
474 int reg; /* Register in the register class. */
477 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
478 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
479 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
480 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
481 { "Any", -1 }, /* RELOAD_REG_ANY. */
484 /* Mask bits for each register class, indexed per mode. Historically the
485 compiler has been more restrictive which types can do PRE_MODIFY instead of
486 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
487 typedef unsigned char addr_mask_type;
489 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
490 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
491 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
492 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
493 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
494 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
495 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
496 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
498 /* Register type masks based on the type, of valid addressing modes. */
499 struct rs6000_reg_addr {
500 enum insn_code reload_load; /* INSN to reload for loading. */
501 enum insn_code reload_store; /* INSN to reload for storing. */
502 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
503 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
504 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
505 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
506 /* INSNs for fusing addi with loads
507 or stores for each reg. class. */
508 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
509 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
510 /* INSNs for fusing addis with loads
511 or stores for each reg. class. */
512 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
513 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
514 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
515 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
516 bool fused_toc; /* Mode supports TOC fusion. */
519 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
521 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
522 static inline bool
523 mode_supports_pre_incdec_p (machine_mode mode)
525 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
526 != 0);
529 /* Helper function to say whether a mode supports PRE_MODIFY. */
530 static inline bool
531 mode_supports_pre_modify_p (machine_mode mode)
533 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
534 != 0);
537 /* Given that there exists at least one variable that is set (produced)
538 by OUT_INSN and read (consumed) by IN_INSN, return true iff
539 IN_INSN represents one or more memory store operations and none of
540 the variables set by OUT_INSN is used by IN_INSN as the address of a
541 store operation. If either IN_INSN or OUT_INSN does not represent
542 a "single" RTL SET expression (as loosely defined by the
543 implementation of the single_set function) or a PARALLEL with only
544 SETs, CLOBBERs, and USEs inside, this function returns false.
546 This rs6000-specific version of store_data_bypass_p checks for
547 certain conditions that result in assertion failures (and internal
548 compiler errors) in the generic store_data_bypass_p function and
549 returns false rather than calling store_data_bypass_p if one of the
550 problematic conditions is detected. */
553 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
555 rtx out_set, in_set;
556 rtx out_pat, in_pat;
557 rtx out_exp, in_exp;
558 int i, j;
560 in_set = single_set (in_insn);
561 if (in_set)
563 if (MEM_P (SET_DEST (in_set)))
565 out_set = single_set (out_insn);
566 if (!out_set)
568 out_pat = PATTERN (out_insn);
569 if (GET_CODE (out_pat) == PARALLEL)
571 for (i = 0; i < XVECLEN (out_pat, 0); i++)
573 out_exp = XVECEXP (out_pat, 0, i);
574 if ((GET_CODE (out_exp) == CLOBBER)
575 || (GET_CODE (out_exp) == USE))
576 continue;
577 else if (GET_CODE (out_exp) != SET)
578 return false;
584 else
586 in_pat = PATTERN (in_insn);
587 if (GET_CODE (in_pat) != PARALLEL)
588 return false;
590 for (i = 0; i < XVECLEN (in_pat, 0); i++)
592 in_exp = XVECEXP (in_pat, 0, i);
593 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
594 continue;
595 else if (GET_CODE (in_exp) != SET)
596 return false;
598 if (MEM_P (SET_DEST (in_exp)))
600 out_set = single_set (out_insn);
601 if (!out_set)
603 out_pat = PATTERN (out_insn);
604 if (GET_CODE (out_pat) != PARALLEL)
605 return false;
606 for (j = 0; j < XVECLEN (out_pat, 0); j++)
608 out_exp = XVECEXP (out_pat, 0, j);
609 if ((GET_CODE (out_exp) == CLOBBER)
610 || (GET_CODE (out_exp) == USE))
611 continue;
612 else if (GET_CODE (out_exp) != SET)
613 return false;
619 return store_data_bypass_p (out_insn, in_insn);
622 /* Return true if we have D-form addressing in altivec registers. */
623 static inline bool
624 mode_supports_vmx_dform (machine_mode mode)
626 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
629 /* Return true if we have D-form addressing in VSX registers. This addressing
630 is more limited than normal d-form addressing in that the offset must be
631 aligned on a 16-byte boundary. */
632 static inline bool
633 mode_supports_vsx_dform_quad (machine_mode mode)
635 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
636 != 0);
640 /* Target cpu costs. */
642 struct processor_costs {
643 const int mulsi; /* cost of SImode multiplication. */
644 const int mulsi_const; /* cost of SImode multiplication by constant. */
645 const int mulsi_const9; /* cost of SImode mult by short constant. */
646 const int muldi; /* cost of DImode multiplication. */
647 const int divsi; /* cost of SImode division. */
648 const int divdi; /* cost of DImode division. */
649 const int fp; /* cost of simple SFmode and DFmode insns. */
650 const int dmul; /* cost of DFmode multiplication (and fmadd). */
651 const int sdiv; /* cost of SFmode division (fdivs). */
652 const int ddiv; /* cost of DFmode division (fdiv). */
653 const int cache_line_size; /* cache line size in bytes. */
654 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
655 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
656 const int simultaneous_prefetches; /* number of parallel prefetch
657 operations. */
658 const int sfdf_convert; /* cost of SF->DF conversion. */
661 const struct processor_costs *rs6000_cost;
663 /* Processor costs (relative to an add) */
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_Q
1277 #undef RS6000_BUILTIN_X
1279 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1304 { NAME, ICODE, MASK, ATTR },
1306 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1307 { NAME, ICODE, MASK, ATTR },
1309 struct rs6000_builtin_info_type {
1310 const char *name;
1311 const enum insn_code icode;
1312 const HOST_WIDE_INT mask;
1313 const unsigned attr;
1316 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1318 #include "rs6000-builtin.def"
1321 #undef RS6000_BUILTIN_0
1322 #undef RS6000_BUILTIN_1
1323 #undef RS6000_BUILTIN_2
1324 #undef RS6000_BUILTIN_3
1325 #undef RS6000_BUILTIN_A
1326 #undef RS6000_BUILTIN_D
1327 #undef RS6000_BUILTIN_H
1328 #undef RS6000_BUILTIN_P
1329 #undef RS6000_BUILTIN_Q
1330 #undef RS6000_BUILTIN_X
1332 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1333 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1336 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1337 static struct machine_function * rs6000_init_machine_status (void);
1338 static int rs6000_ra_ever_killed (void);
1339 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1340 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1341 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1342 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1343 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1344 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1345 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1346 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1347 bool);
1348 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1349 unsigned int);
1350 static bool is_microcoded_insn (rtx_insn *);
1351 static bool is_nonpipeline_insn (rtx_insn *);
1352 static bool is_cracked_insn (rtx_insn *);
1353 static bool is_load_insn (rtx, rtx *);
1354 static bool is_store_insn (rtx, rtx *);
1355 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1356 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1357 static bool insn_must_be_first_in_group (rtx_insn *);
1358 static bool insn_must_be_last_in_group (rtx_insn *);
1359 static void altivec_init_builtins (void);
1360 static tree builtin_function_type (machine_mode, machine_mode,
1361 machine_mode, machine_mode,
1362 enum rs6000_builtins, const char *name);
1363 static void rs6000_common_init_builtins (void);
1364 static void paired_init_builtins (void);
1365 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1366 static void htm_init_builtins (void);
1367 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1368 static rs6000_stack_t *rs6000_stack_info (void);
1369 static void is_altivec_return_reg (rtx, void *);
1370 int easy_vector_constant (rtx, machine_mode);
1371 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1372 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1373 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1374 bool, bool);
1375 #if TARGET_MACHO
1376 static void macho_branch_islands (void);
1377 #endif
1378 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1379 int, int *);
1380 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1381 int, int, int *);
1382 static bool rs6000_mode_dependent_address (const_rtx);
1383 static bool rs6000_debug_mode_dependent_address (const_rtx);
1384 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1385 machine_mode, rtx);
1386 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1387 machine_mode,
1388 rtx);
1389 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1390 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1391 enum reg_class);
1392 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1393 machine_mode);
1394 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1395 enum reg_class,
1396 machine_mode);
1397 static bool rs6000_cannot_change_mode_class (machine_mode,
1398 machine_mode,
1399 enum reg_class);
1400 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1401 machine_mode,
1402 enum reg_class);
1403 static bool rs6000_save_toc_in_prologue_p (void);
1404 static rtx rs6000_internal_arg_pointer (void);
1406 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1407 int, int *)
1408 = rs6000_legitimize_reload_address;
1410 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1411 = rs6000_mode_dependent_address;
1413 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1414 machine_mode, rtx)
1415 = rs6000_secondary_reload_class;
1417 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1418 = rs6000_preferred_reload_class;
1420 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1421 machine_mode)
1422 = rs6000_secondary_memory_needed;
1424 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1425 machine_mode,
1426 enum reg_class)
1427 = rs6000_cannot_change_mode_class;
1429 const int INSN_NOT_AVAILABLE = -1;
1431 static void rs6000_print_isa_options (FILE *, int, const char *,
1432 HOST_WIDE_INT);
1433 static void rs6000_print_builtin_options (FILE *, int, const char *,
1434 HOST_WIDE_INT);
1435 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1437 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1438 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1439 enum rs6000_reg_type,
1440 machine_mode,
1441 secondary_reload_info *,
1442 bool);
1443 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1444 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1445 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1447 /* Hash table stuff for keeping track of TOC entries. */
1449 struct GTY((for_user)) toc_hash_struct
1451 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1452 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1453 rtx key;
1454 machine_mode key_mode;
1455 int labelno;
1458 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1460 static hashval_t hash (toc_hash_struct *);
1461 static bool equal (toc_hash_struct *, toc_hash_struct *);
1464 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1466 /* Hash table to keep track of the argument types for builtin functions. */
1468 struct GTY((for_user)) builtin_hash_struct
1470 tree type;
1471 machine_mode mode[4]; /* return value + 3 arguments. */
1472 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1475 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1477 static hashval_t hash (builtin_hash_struct *);
1478 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1481 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1484 /* Default register names. */
1485 char rs6000_reg_names[][8] =
1487 "0", "1", "2", "3", "4", "5", "6", "7",
1488 "8", "9", "10", "11", "12", "13", "14", "15",
1489 "16", "17", "18", "19", "20", "21", "22", "23",
1490 "24", "25", "26", "27", "28", "29", "30", "31",
1491 "0", "1", "2", "3", "4", "5", "6", "7",
1492 "8", "9", "10", "11", "12", "13", "14", "15",
1493 "16", "17", "18", "19", "20", "21", "22", "23",
1494 "24", "25", "26", "27", "28", "29", "30", "31",
1495 "mq", "lr", "ctr","ap",
1496 "0", "1", "2", "3", "4", "5", "6", "7",
1497 "ca",
1498 /* AltiVec registers. */
1499 "0", "1", "2", "3", "4", "5", "6", "7",
1500 "8", "9", "10", "11", "12", "13", "14", "15",
1501 "16", "17", "18", "19", "20", "21", "22", "23",
1502 "24", "25", "26", "27", "28", "29", "30", "31",
1503 "vrsave", "vscr",
1504 /* Soft frame pointer. */
1505 "sfp",
1506 /* HTM SPR registers. */
1507 "tfhar", "tfiar", "texasr"
1510 #ifdef TARGET_REGNAMES
1511 static const char alt_reg_names[][8] =
1513 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1514 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1515 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1516 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1517 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1518 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1519 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1520 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1521 "mq", "lr", "ctr", "ap",
1522 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1523 "ca",
1524 /* AltiVec registers. */
1525 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1526 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1527 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1528 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1529 "vrsave", "vscr",
1530 /* Soft frame pointer. */
1531 "sfp",
1532 /* HTM SPR registers. */
1533 "tfhar", "tfiar", "texasr"
1535 #endif
1537 /* Table of valid machine attributes. */
1539 static const struct attribute_spec rs6000_attribute_table[] =
1541 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1542 affects_type_identity } */
1543 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1544 false },
1545 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1546 false },
1547 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1548 false },
1549 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1550 false },
1551 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1552 false },
1553 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1554 SUBTARGET_ATTRIBUTE_TABLE,
1555 #endif
1556 { NULL, 0, 0, false, false, false, NULL, false }
1559 #ifndef TARGET_PROFILE_KERNEL
1560 #define TARGET_PROFILE_KERNEL 0
1561 #endif
1563 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1564 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1566 /* Initialize the GCC target structure. */
1567 #undef TARGET_ATTRIBUTE_TABLE
1568 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1569 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1570 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1571 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1572 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1574 #undef TARGET_ASM_ALIGNED_DI_OP
1575 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1577 /* Default unaligned ops are only provided for ELF. Find the ops needed
1578 for non-ELF systems. */
1579 #ifndef OBJECT_FORMAT_ELF
1580 #if TARGET_XCOFF
1581 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1582 64-bit targets. */
1583 #undef TARGET_ASM_UNALIGNED_HI_OP
1584 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1585 #undef TARGET_ASM_UNALIGNED_SI_OP
1586 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1587 #undef TARGET_ASM_UNALIGNED_DI_OP
1588 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1589 #else
1590 /* For Darwin. */
1591 #undef TARGET_ASM_UNALIGNED_HI_OP
1592 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1593 #undef TARGET_ASM_UNALIGNED_SI_OP
1594 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1595 #undef TARGET_ASM_UNALIGNED_DI_OP
1596 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1597 #undef TARGET_ASM_ALIGNED_DI_OP
1598 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1599 #endif
1600 #endif
1602 /* This hook deals with fixups for relocatable code and DI-mode objects
1603 in 64-bit code. */
1604 #undef TARGET_ASM_INTEGER
1605 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1607 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1608 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1609 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1610 #endif
1612 #undef TARGET_SET_UP_BY_PROLOGUE
1613 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1615 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1616 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1617 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1618 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1619 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1620 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1621 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1622 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1623 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1624 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1625 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1626 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1628 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1629 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1631 #undef TARGET_INTERNAL_ARG_POINTER
1632 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1634 #undef TARGET_HAVE_TLS
1635 #define TARGET_HAVE_TLS HAVE_AS_TLS
1637 #undef TARGET_CANNOT_FORCE_CONST_MEM
1638 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1640 #undef TARGET_DELEGITIMIZE_ADDRESS
1641 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1643 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1644 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1646 #undef TARGET_LEGITIMATE_COMBINED_INSN
1647 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1649 #undef TARGET_ASM_FUNCTION_PROLOGUE
1650 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1651 #undef TARGET_ASM_FUNCTION_EPILOGUE
1652 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1654 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1655 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1657 #undef TARGET_LEGITIMIZE_ADDRESS
1658 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1660 #undef TARGET_SCHED_VARIABLE_ISSUE
1661 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1663 #undef TARGET_SCHED_ISSUE_RATE
1664 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1665 #undef TARGET_SCHED_ADJUST_COST
1666 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1667 #undef TARGET_SCHED_ADJUST_PRIORITY
1668 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1669 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1670 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1671 #undef TARGET_SCHED_INIT
1672 #define TARGET_SCHED_INIT rs6000_sched_init
1673 #undef TARGET_SCHED_FINISH
1674 #define TARGET_SCHED_FINISH rs6000_sched_finish
1675 #undef TARGET_SCHED_REORDER
1676 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1677 #undef TARGET_SCHED_REORDER2
1678 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1680 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1681 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1683 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1684 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1686 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1687 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1688 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1689 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1690 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1691 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1692 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1693 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1695 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1696 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1698 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1699 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1700 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1701 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1702 rs6000_builtin_support_vector_misalignment
1703 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1704 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1705 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1706 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1707 rs6000_builtin_vectorization_cost
1708 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1709 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1710 rs6000_preferred_simd_mode
1711 #undef TARGET_VECTORIZE_INIT_COST
1712 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1713 #undef TARGET_VECTORIZE_ADD_STMT_COST
1714 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1715 #undef TARGET_VECTORIZE_FINISH_COST
1716 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1717 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1718 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1720 #undef TARGET_INIT_BUILTINS
1721 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1722 #undef TARGET_BUILTIN_DECL
1723 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1725 #undef TARGET_FOLD_BUILTIN
1726 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1727 #undef TARGET_GIMPLE_FOLD_BUILTIN
1728 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1730 #undef TARGET_EXPAND_BUILTIN
1731 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1733 #undef TARGET_MANGLE_TYPE
1734 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1736 #undef TARGET_INIT_LIBFUNCS
1737 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1739 #if TARGET_MACHO
1740 #undef TARGET_BINDS_LOCAL_P
1741 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1742 #endif
1744 #undef TARGET_MS_BITFIELD_LAYOUT_P
1745 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1747 #undef TARGET_ASM_OUTPUT_MI_THUNK
1748 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1750 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1751 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1753 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1754 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1756 #undef TARGET_REGISTER_MOVE_COST
1757 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1758 #undef TARGET_MEMORY_MOVE_COST
1759 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1760 #undef TARGET_CANNOT_COPY_INSN_P
1761 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1762 #undef TARGET_RTX_COSTS
1763 #define TARGET_RTX_COSTS rs6000_rtx_costs
1764 #undef TARGET_ADDRESS_COST
1765 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1767 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1768 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1770 #undef TARGET_PROMOTE_FUNCTION_MODE
1771 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1773 #undef TARGET_RETURN_IN_MEMORY
1774 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1776 #undef TARGET_RETURN_IN_MSB
1777 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1779 #undef TARGET_SETUP_INCOMING_VARARGS
1780 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1782 /* Always strict argument naming on rs6000. */
1783 #undef TARGET_STRICT_ARGUMENT_NAMING
1784 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1785 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1786 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1787 #undef TARGET_SPLIT_COMPLEX_ARG
1788 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1789 #undef TARGET_MUST_PASS_IN_STACK
1790 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1791 #undef TARGET_PASS_BY_REFERENCE
1792 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1793 #undef TARGET_ARG_PARTIAL_BYTES
1794 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1795 #undef TARGET_FUNCTION_ARG_ADVANCE
1796 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1797 #undef TARGET_FUNCTION_ARG
1798 #define TARGET_FUNCTION_ARG rs6000_function_arg
1799 #undef TARGET_FUNCTION_ARG_PADDING
1800 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1801 #undef TARGET_FUNCTION_ARG_BOUNDARY
1802 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1804 #undef TARGET_BUILD_BUILTIN_VA_LIST
1805 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1807 #undef TARGET_EXPAND_BUILTIN_VA_START
1808 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1810 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1811 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1813 #undef TARGET_EH_RETURN_FILTER_MODE
1814 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1816 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1817 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1819 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1820 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1822 #undef TARGET_FLOATN_MODE
1823 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1825 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1826 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1828 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1829 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1831 #undef TARGET_MD_ASM_ADJUST
1832 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1834 #undef TARGET_OPTION_OVERRIDE
1835 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1837 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1838 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1839 rs6000_builtin_vectorized_function
1841 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1842 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1843 rs6000_builtin_md_vectorized_function
1845 #undef TARGET_STACK_PROTECT_GUARD
1846 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1848 #if !TARGET_MACHO
1849 #undef TARGET_STACK_PROTECT_FAIL
1850 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1851 #endif
1853 #ifdef HAVE_AS_TLS
1854 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1855 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1856 #endif
1858 /* Use a 32-bit anchor range. This leads to sequences like:
1860 addis tmp,anchor,high
1861 add dest,tmp,low
1863 where tmp itself acts as an anchor, and can be shared between
1864 accesses to the same 64k page. */
1865 #undef TARGET_MIN_ANCHOR_OFFSET
1866 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1867 #undef TARGET_MAX_ANCHOR_OFFSET
1868 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1869 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1870 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1871 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1872 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1874 #undef TARGET_BUILTIN_RECIPROCAL
1875 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1877 #undef TARGET_SECONDARY_RELOAD
1878 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1880 #undef TARGET_LEGITIMATE_ADDRESS_P
1881 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1883 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1884 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1886 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1887 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1889 #undef TARGET_CAN_ELIMINATE
1890 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1892 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1893 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1895 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1896 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1898 #undef TARGET_TRAMPOLINE_INIT
1899 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1901 #undef TARGET_FUNCTION_VALUE
1902 #define TARGET_FUNCTION_VALUE rs6000_function_value
1904 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1905 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1907 #undef TARGET_OPTION_SAVE
1908 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1910 #undef TARGET_OPTION_RESTORE
1911 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1913 #undef TARGET_OPTION_PRINT
1914 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1916 #undef TARGET_CAN_INLINE_P
1917 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1919 #undef TARGET_SET_CURRENT_FUNCTION
1920 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1922 #undef TARGET_LEGITIMATE_CONSTANT_P
1923 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1925 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1926 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1928 #undef TARGET_CAN_USE_DOLOOP_P
1929 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1931 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1932 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1934 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1935 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1936 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1937 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1938 #undef TARGET_UNWIND_WORD_MODE
1939 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1941 #undef TARGET_OFFLOAD_OPTIONS
1942 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1944 #undef TARGET_C_MODE_FOR_SUFFIX
1945 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1947 #undef TARGET_INVALID_BINARY_OP
1948 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1950 #undef TARGET_OPTAB_SUPPORTED_P
1951 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1953 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1954 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1956 #undef TARGET_COMPARE_VERSION_PRIORITY
1957 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1959 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1960 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1961 rs6000_generate_version_dispatcher_body
1963 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1964 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1965 rs6000_get_function_versions_dispatcher
1967 #undef TARGET_OPTION_FUNCTION_VERSIONS
1968 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1970 #undef TARGET_HARD_REGNO_MODE_OK
1971 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1973 #undef TARGET_MODES_TIEABLE_P
1974 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1976 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1977 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1978 rs6000_hard_regno_call_part_clobbered
1981 /* Processor table. */
1982 struct rs6000_ptt
1984 const char *const name; /* Canonical processor name. */
1985 const enum processor_type processor; /* Processor type enum value. */
1986 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1989 static struct rs6000_ptt const processor_target_table[] =
1991 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1992 #include "rs6000-cpus.def"
1993 #undef RS6000_CPU
1996 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1997 name is invalid. */
1999 static int
2000 rs6000_cpu_name_lookup (const char *name)
2002 size_t i;
2004 if (name != NULL)
2006 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2007 if (! strcmp (name, processor_target_table[i].name))
2008 return (int)i;
2011 return -1;
2015 /* Return number of consecutive hard regs needed starting at reg REGNO
2016 to hold something of mode MODE.
2017 This is ordinarily the length in words of a value of mode MODE
2018 but can be less for certain modes in special long registers.
2020 POWER and PowerPC GPRs hold 32 bits worth;
2021 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2023 static int
2024 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2026 unsigned HOST_WIDE_INT reg_size;
2028 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2029 128-bit floating point that can go in vector registers, which has VSX
2030 memory addressing. */
2031 if (FP_REGNO_P (regno))
2032 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2033 ? UNITS_PER_VSX_WORD
2034 : UNITS_PER_FP_WORD);
2036 else if (ALTIVEC_REGNO_P (regno))
2037 reg_size = UNITS_PER_ALTIVEC_WORD;
2039 else
2040 reg_size = UNITS_PER_WORD;
2042 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2045 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2046 MODE. */
2047 static int
2048 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2050 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2052 if (COMPLEX_MODE_P (mode))
2053 mode = GET_MODE_INNER (mode);
2055 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2056 register combinations, and use PTImode where we need to deal with quad
2057 word memory operations. Don't allow quad words in the argument or frame
2058 pointer registers, just registers 0..31. */
2059 if (mode == PTImode)
2060 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2061 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2062 && ((regno & 1) == 0));
2064 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2065 implementations. Don't allow an item to be split between a FP register
2066 and an Altivec register. Allow TImode in all VSX registers if the user
2067 asked for it. */
2068 if (TARGET_VSX && VSX_REGNO_P (regno)
2069 && (VECTOR_MEM_VSX_P (mode)
2070 || FLOAT128_VECTOR_P (mode)
2071 || reg_addr[mode].scalar_in_vmx_p
2072 || mode == TImode
2073 || (TARGET_VADDUQM && mode == V1TImode)))
2075 if (FP_REGNO_P (regno))
2076 return FP_REGNO_P (last_regno);
2078 if (ALTIVEC_REGNO_P (regno))
2080 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2081 return 0;
2083 return ALTIVEC_REGNO_P (last_regno);
2087 /* The GPRs can hold any mode, but values bigger than one register
2088 cannot go past R31. */
2089 if (INT_REGNO_P (regno))
2090 return INT_REGNO_P (last_regno);
2092 /* The float registers (except for VSX vector modes) can only hold floating
2093 modes and DImode. */
2094 if (FP_REGNO_P (regno))
2096 if (FLOAT128_VECTOR_P (mode))
2097 return false;
2099 if (SCALAR_FLOAT_MODE_P (mode)
2100 && (mode != TDmode || (regno % 2) == 0)
2101 && FP_REGNO_P (last_regno))
2102 return 1;
2104 if (GET_MODE_CLASS (mode) == MODE_INT)
2106 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2107 return 1;
2109 if (TARGET_P8_VECTOR && (mode == SImode))
2110 return 1;
2112 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2113 return 1;
2116 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2117 && PAIRED_VECTOR_MODE (mode))
2118 return 1;
2120 return 0;
2123 /* The CR register can only hold CC modes. */
2124 if (CR_REGNO_P (regno))
2125 return GET_MODE_CLASS (mode) == MODE_CC;
2127 if (CA_REGNO_P (regno))
2128 return mode == Pmode || mode == SImode;
2130 /* AltiVec only in AldyVec registers. */
2131 if (ALTIVEC_REGNO_P (regno))
2132 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2133 || mode == V1TImode);
2135 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2136 and it must be able to fit within the register set. */
2138 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2141 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2143 static bool
2144 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2146 return rs6000_hard_regno_mode_ok_p[mode][regno];
2149 /* Implement TARGET_MODES_TIEABLE_P.
2151 PTImode cannot tie with other modes because PTImode is restricted to even
2152 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2153 57744).
2155 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2156 128-bit floating point on VSX systems ties with other vectors. */
2158 static bool
2159 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2161 if (mode1 == PTImode)
2162 return mode2 == PTImode;
2163 if (mode2 == PTImode)
2164 return false;
2166 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2167 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2168 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2169 return false;
2171 if (SCALAR_FLOAT_MODE_P (mode1))
2172 return SCALAR_FLOAT_MODE_P (mode2);
2173 if (SCALAR_FLOAT_MODE_P (mode2))
2174 return false;
2176 if (GET_MODE_CLASS (mode1) == MODE_CC)
2177 return GET_MODE_CLASS (mode2) == MODE_CC;
2178 if (GET_MODE_CLASS (mode2) == MODE_CC)
2179 return false;
2181 if (PAIRED_VECTOR_MODE (mode1))
2182 return PAIRED_VECTOR_MODE (mode2);
2183 if (PAIRED_VECTOR_MODE (mode2))
2184 return false;
2186 return true;
2189 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2191 static bool
2192 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2194 if (TARGET_32BIT
2195 && TARGET_POWERPC64
2196 && GET_MODE_SIZE (mode) > 4
2197 && INT_REGNO_P (regno))
2198 return true;
2200 if (TARGET_VSX
2201 && FP_REGNO_P (regno)
2202 && GET_MODE_SIZE (mode) > 8
2203 && !FLOAT128_2REG_P (mode))
2204 return true;
2206 return false;
2209 /* Print interesting facts about registers. */
2210 static void
2211 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2213 int r, m;
2215 for (r = first_regno; r <= last_regno; ++r)
2217 const char *comma = "";
2218 int len;
2220 if (first_regno == last_regno)
2221 fprintf (stderr, "%s:\t", reg_name);
2222 else
2223 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2225 len = 8;
2226 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2227 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2229 if (len > 70)
2231 fprintf (stderr, ",\n\t");
2232 len = 8;
2233 comma = "";
2236 if (rs6000_hard_regno_nregs[m][r] > 1)
2237 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2238 rs6000_hard_regno_nregs[m][r]);
2239 else
2240 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2242 comma = ", ";
2245 if (call_used_regs[r])
2247 if (len > 70)
2249 fprintf (stderr, ",\n\t");
2250 len = 8;
2251 comma = "";
2254 len += fprintf (stderr, "%s%s", comma, "call-used");
2255 comma = ", ";
2258 if (fixed_regs[r])
2260 if (len > 70)
2262 fprintf (stderr, ",\n\t");
2263 len = 8;
2264 comma = "";
2267 len += fprintf (stderr, "%s%s", comma, "fixed");
2268 comma = ", ";
2271 if (len > 70)
2273 fprintf (stderr, ",\n\t");
2274 comma = "";
2277 len += fprintf (stderr, "%sreg-class = %s", comma,
2278 reg_class_names[(int)rs6000_regno_regclass[r]]);
2279 comma = ", ";
2281 if (len > 70)
2283 fprintf (stderr, ",\n\t");
2284 comma = "";
2287 fprintf (stderr, "%sregno = %d\n", comma, r);
2291 static const char *
2292 rs6000_debug_vector_unit (enum rs6000_vector v)
2294 const char *ret;
2296 switch (v)
2298 case VECTOR_NONE: ret = "none"; break;
2299 case VECTOR_ALTIVEC: ret = "altivec"; break;
2300 case VECTOR_VSX: ret = "vsx"; break;
2301 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2302 case VECTOR_PAIRED: ret = "paired"; break;
2303 case VECTOR_OTHER: ret = "other"; break;
2304 default: ret = "unknown"; break;
2307 return ret;
2310 /* Inner function printing just the address mask for a particular reload
2311 register class. */
2312 DEBUG_FUNCTION char *
2313 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2315 static char ret[8];
2316 char *p = ret;
2318 if ((mask & RELOAD_REG_VALID) != 0)
2319 *p++ = 'v';
2320 else if (keep_spaces)
2321 *p++ = ' ';
2323 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2324 *p++ = 'm';
2325 else if (keep_spaces)
2326 *p++ = ' ';
2328 if ((mask & RELOAD_REG_INDEXED) != 0)
2329 *p++ = 'i';
2330 else if (keep_spaces)
2331 *p++ = ' ';
2333 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2334 *p++ = 'O';
2335 else if ((mask & RELOAD_REG_OFFSET) != 0)
2336 *p++ = 'o';
2337 else if (keep_spaces)
2338 *p++ = ' ';
2340 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2341 *p++ = '+';
2342 else if (keep_spaces)
2343 *p++ = ' ';
2345 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2346 *p++ = '+';
2347 else if (keep_spaces)
2348 *p++ = ' ';
2350 if ((mask & RELOAD_REG_AND_M16) != 0)
2351 *p++ = '&';
2352 else if (keep_spaces)
2353 *p++ = ' ';
2355 *p = '\0';
2357 return ret;
2360 /* Print the address masks in a human readble fashion. */
2361 DEBUG_FUNCTION void
2362 rs6000_debug_print_mode (ssize_t m)
2364 ssize_t rc;
2365 int spaces = 0;
2366 bool fuse_extra_p;
2368 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2369 for (rc = 0; rc < N_RELOAD_REG; rc++)
2370 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2371 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2373 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2374 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2375 fprintf (stderr, " Reload=%c%c",
2376 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2377 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2378 else
2379 spaces += sizeof (" Reload=sl") - 1;
2381 if (reg_addr[m].scalar_in_vmx_p)
2383 fprintf (stderr, "%*s Upper=y", spaces, "");
2384 spaces = 0;
2386 else
2387 spaces += sizeof (" Upper=y") - 1;
2389 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2390 || reg_addr[m].fused_toc);
2391 if (!fuse_extra_p)
2393 for (rc = 0; rc < N_RELOAD_REG; rc++)
2395 if (rc != RELOAD_REG_ANY)
2397 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2398 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2399 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2400 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2401 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2403 fuse_extra_p = true;
2404 break;
2410 if (fuse_extra_p)
2412 fprintf (stderr, "%*s Fuse:", spaces, "");
2413 spaces = 0;
2415 for (rc = 0; rc < N_RELOAD_REG; rc++)
2417 if (rc != RELOAD_REG_ANY)
2419 char load, store;
2421 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2422 load = 'l';
2423 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2424 load = 'L';
2425 else
2426 load = '-';
2428 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2429 store = 's';
2430 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2431 store = 'S';
2432 else
2433 store = '-';
2435 if (load == '-' && store == '-')
2436 spaces += 5;
2437 else
2439 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2440 reload_reg_map[rc].name[0], load, store);
2441 spaces = 0;
2446 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2448 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2449 spaces = 0;
2451 else
2452 spaces += sizeof (" P8gpr") - 1;
2454 if (reg_addr[m].fused_toc)
2456 fprintf (stderr, "%*sToc", (spaces + 1), "");
2457 spaces = 0;
2459 else
2460 spaces += sizeof (" Toc") - 1;
2462 else
2463 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2465 if (rs6000_vector_unit[m] != VECTOR_NONE
2466 || rs6000_vector_mem[m] != VECTOR_NONE)
2468 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2469 spaces, "",
2470 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2471 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2474 fputs ("\n", stderr);
2477 #define DEBUG_FMT_ID "%-32s= "
2478 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2479 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2480 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2482 /* Print various interesting information with -mdebug=reg. */
2483 static void
2484 rs6000_debug_reg_global (void)
2486 static const char *const tf[2] = { "false", "true" };
2487 const char *nl = (const char *)0;
2488 int m;
2489 size_t m1, m2, v;
2490 char costly_num[20];
2491 char nop_num[20];
2492 char flags_buffer[40];
2493 const char *costly_str;
2494 const char *nop_str;
2495 const char *trace_str;
2496 const char *abi_str;
2497 const char *cmodel_str;
2498 struct cl_target_option cl_opts;
2500 /* Modes we want tieable information on. */
2501 static const machine_mode print_tieable_modes[] = {
2502 QImode,
2503 HImode,
2504 SImode,
2505 DImode,
2506 TImode,
2507 PTImode,
2508 SFmode,
2509 DFmode,
2510 TFmode,
2511 IFmode,
2512 KFmode,
2513 SDmode,
2514 DDmode,
2515 TDmode,
2516 V2SImode,
2517 V16QImode,
2518 V8HImode,
2519 V4SImode,
2520 V2DImode,
2521 V1TImode,
2522 V32QImode,
2523 V16HImode,
2524 V8SImode,
2525 V4DImode,
2526 V2TImode,
2527 V2SFmode,
2528 V4SFmode,
2529 V2DFmode,
2530 V8SFmode,
2531 V4DFmode,
2532 CCmode,
2533 CCUNSmode,
2534 CCEQmode,
2537 /* Virtual regs we are interested in. */
2538 const static struct {
2539 int regno; /* register number. */
2540 const char *name; /* register name. */
2541 } virtual_regs[] = {
2542 { STACK_POINTER_REGNUM, "stack pointer:" },
2543 { TOC_REGNUM, "toc: " },
2544 { STATIC_CHAIN_REGNUM, "static chain: " },
2545 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2546 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2547 { ARG_POINTER_REGNUM, "arg pointer: " },
2548 { FRAME_POINTER_REGNUM, "frame pointer:" },
2549 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2550 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2551 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2552 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2553 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2554 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2555 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2556 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2557 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2560 fputs ("\nHard register information:\n", stderr);
2561 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2562 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2563 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2564 LAST_ALTIVEC_REGNO,
2565 "vs");
2566 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2567 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2568 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2569 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2570 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2571 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2573 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2574 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2575 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2577 fprintf (stderr,
2578 "\n"
2579 "d reg_class = %s\n"
2580 "f reg_class = %s\n"
2581 "v reg_class = %s\n"
2582 "wa reg_class = %s\n"
2583 "wb reg_class = %s\n"
2584 "wd reg_class = %s\n"
2585 "we reg_class = %s\n"
2586 "wf reg_class = %s\n"
2587 "wg reg_class = %s\n"
2588 "wh reg_class = %s\n"
2589 "wi reg_class = %s\n"
2590 "wj reg_class = %s\n"
2591 "wk reg_class = %s\n"
2592 "wl reg_class = %s\n"
2593 "wm reg_class = %s\n"
2594 "wo reg_class = %s\n"
2595 "wp reg_class = %s\n"
2596 "wq reg_class = %s\n"
2597 "wr reg_class = %s\n"
2598 "ws reg_class = %s\n"
2599 "wt reg_class = %s\n"
2600 "wu reg_class = %s\n"
2601 "wv reg_class = %s\n"
2602 "ww reg_class = %s\n"
2603 "wx reg_class = %s\n"
2604 "wy reg_class = %s\n"
2605 "wz reg_class = %s\n"
2606 "wA reg_class = %s\n"
2607 "wH reg_class = %s\n"
2608 "wI reg_class = %s\n"
2609 "wJ reg_class = %s\n"
2610 "wK reg_class = %s\n"
2611 "\n",
2612 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2613 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2614 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2615 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2616 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2617 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2618 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2619 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2620 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2621 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2622 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2623 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2645 nl = "\n";
2646 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2647 rs6000_debug_print_mode (m);
2649 fputs ("\n", stderr);
2651 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2653 machine_mode mode1 = print_tieable_modes[m1];
2654 bool first_time = true;
2656 nl = (const char *)0;
2657 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2659 machine_mode mode2 = print_tieable_modes[m2];
2660 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2662 if (first_time)
2664 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2665 nl = "\n";
2666 first_time = false;
2669 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2673 if (!first_time)
2674 fputs ("\n", stderr);
2677 if (nl)
2678 fputs (nl, stderr);
2680 if (rs6000_recip_control)
2682 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2684 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2685 if (rs6000_recip_bits[m])
2687 fprintf (stderr,
2688 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2689 GET_MODE_NAME (m),
2690 (RS6000_RECIP_AUTO_RE_P (m)
2691 ? "auto"
2692 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2693 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2694 ? "auto"
2695 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2698 fputs ("\n", stderr);
2701 if (rs6000_cpu_index >= 0)
2703 const char *name = processor_target_table[rs6000_cpu_index].name;
2704 HOST_WIDE_INT flags
2705 = processor_target_table[rs6000_cpu_index].target_enable;
2707 sprintf (flags_buffer, "-mcpu=%s flags", name);
2708 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2710 else
2711 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2713 if (rs6000_tune_index >= 0)
2715 const char *name = processor_target_table[rs6000_tune_index].name;
2716 HOST_WIDE_INT flags
2717 = processor_target_table[rs6000_tune_index].target_enable;
2719 sprintf (flags_buffer, "-mtune=%s flags", name);
2720 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2722 else
2723 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2725 cl_target_option_save (&cl_opts, &global_options);
2726 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2727 rs6000_isa_flags);
2729 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2730 rs6000_isa_flags_explicit);
2732 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2733 rs6000_builtin_mask);
2735 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2737 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2738 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2740 switch (rs6000_sched_costly_dep)
2742 case max_dep_latency:
2743 costly_str = "max_dep_latency";
2744 break;
2746 case no_dep_costly:
2747 costly_str = "no_dep_costly";
2748 break;
2750 case all_deps_costly:
2751 costly_str = "all_deps_costly";
2752 break;
2754 case true_store_to_load_dep_costly:
2755 costly_str = "true_store_to_load_dep_costly";
2756 break;
2758 case store_to_load_dep_costly:
2759 costly_str = "store_to_load_dep_costly";
2760 break;
2762 default:
2763 costly_str = costly_num;
2764 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2765 break;
2768 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2770 switch (rs6000_sched_insert_nops)
2772 case sched_finish_regroup_exact:
2773 nop_str = "sched_finish_regroup_exact";
2774 break;
2776 case sched_finish_pad_groups:
2777 nop_str = "sched_finish_pad_groups";
2778 break;
2780 case sched_finish_none:
2781 nop_str = "sched_finish_none";
2782 break;
2784 default:
2785 nop_str = nop_num;
2786 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2787 break;
2790 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2792 switch (rs6000_sdata)
2794 default:
2795 case SDATA_NONE:
2796 break;
2798 case SDATA_DATA:
2799 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2800 break;
2802 case SDATA_SYSV:
2803 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2804 break;
2806 case SDATA_EABI:
2807 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2808 break;
2812 switch (rs6000_traceback)
2814 case traceback_default: trace_str = "default"; break;
2815 case traceback_none: trace_str = "none"; break;
2816 case traceback_part: trace_str = "part"; break;
2817 case traceback_full: trace_str = "full"; break;
2818 default: trace_str = "unknown"; break;
2821 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2823 switch (rs6000_current_cmodel)
2825 case CMODEL_SMALL: cmodel_str = "small"; break;
2826 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2827 case CMODEL_LARGE: cmodel_str = "large"; break;
2828 default: cmodel_str = "unknown"; break;
2831 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2833 switch (rs6000_current_abi)
2835 case ABI_NONE: abi_str = "none"; break;
2836 case ABI_AIX: abi_str = "aix"; break;
2837 case ABI_ELFv2: abi_str = "ELFv2"; break;
2838 case ABI_V4: abi_str = "V4"; break;
2839 case ABI_DARWIN: abi_str = "darwin"; break;
2840 default: abi_str = "unknown"; break;
2843 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2845 if (rs6000_altivec_abi)
2846 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2848 if (rs6000_darwin64_abi)
2849 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2851 fprintf (stderr, DEBUG_FMT_S, "single_float",
2852 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2854 fprintf (stderr, DEBUG_FMT_S, "double_float",
2855 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2857 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2858 (TARGET_SOFT_FLOAT ? "true" : "false"));
2860 if (TARGET_LINK_STACK)
2861 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2863 if (TARGET_P8_FUSION)
2865 char options[80];
2867 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2868 if (TARGET_TOC_FUSION)
2869 strcat (options, ", toc");
2871 if (TARGET_P8_FUSION_SIGN)
2872 strcat (options, ", sign");
2874 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2877 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2878 TARGET_SECURE_PLT ? "secure" : "bss");
2879 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2880 aix_struct_return ? "aix" : "sysv");
2881 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2882 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2883 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2884 tf[!!rs6000_align_branch_targets]);
2885 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2886 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2887 rs6000_long_double_type_size);
2888 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2889 (int)rs6000_sched_restricted_insns_priority);
2890 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2891 (int)END_BUILTINS);
2892 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2893 (int)RS6000_BUILTIN_COUNT);
2895 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2896 (int)TARGET_FLOAT128_ENABLE_TYPE);
2898 if (TARGET_VSX)
2899 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2900 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2902 if (TARGET_DIRECT_MOVE_128)
2903 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2904 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2908 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2909 legitimate address support to figure out the appropriate addressing to
2910 use. */
2912 static void
2913 rs6000_setup_reg_addr_masks (void)
2915 ssize_t rc, reg, m, nregs;
2916 addr_mask_type any_addr_mask, addr_mask;
2918 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2920 machine_mode m2 = (machine_mode) m;
2921 bool complex_p = false;
2922 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2923 size_t msize;
2925 if (COMPLEX_MODE_P (m2))
2927 complex_p = true;
2928 m2 = GET_MODE_INNER (m2);
2931 msize = GET_MODE_SIZE (m2);
2933 /* SDmode is special in that we want to access it only via REG+REG
2934 addressing on power7 and above, since we want to use the LFIWZX and
2935 STFIWZX instructions to load it. */
2936 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2938 any_addr_mask = 0;
2939 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2941 addr_mask = 0;
2942 reg = reload_reg_map[rc].reg;
2944 /* Can mode values go in the GPR/FPR/Altivec registers? */
2945 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2947 bool small_int_vsx_p = (small_int_p
2948 && (rc == RELOAD_REG_FPR
2949 || rc == RELOAD_REG_VMX));
2951 nregs = rs6000_hard_regno_nregs[m][reg];
2952 addr_mask |= RELOAD_REG_VALID;
2954 /* Indicate if the mode takes more than 1 physical register. If
2955 it takes a single register, indicate it can do REG+REG
2956 addressing. Small integers in VSX registers can only do
2957 REG+REG addressing. */
2958 if (small_int_vsx_p)
2959 addr_mask |= RELOAD_REG_INDEXED;
2960 else if (nregs > 1 || m == BLKmode || complex_p)
2961 addr_mask |= RELOAD_REG_MULTIPLE;
2962 else
2963 addr_mask |= RELOAD_REG_INDEXED;
2965 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2966 addressing. If we allow scalars into Altivec registers,
2967 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2969 if (TARGET_UPDATE
2970 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2971 && msize <= 8
2972 && !VECTOR_MODE_P (m2)
2973 && !FLOAT128_VECTOR_P (m2)
2974 && !complex_p
2975 && !small_int_vsx_p)
2977 addr_mask |= RELOAD_REG_PRE_INCDEC;
2979 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2980 we don't allow PRE_MODIFY for some multi-register
2981 operations. */
2982 switch (m)
2984 default:
2985 addr_mask |= RELOAD_REG_PRE_MODIFY;
2986 break;
2988 case E_DImode:
2989 if (TARGET_POWERPC64)
2990 addr_mask |= RELOAD_REG_PRE_MODIFY;
2991 break;
2993 case E_DFmode:
2994 case E_DDmode:
2995 if (TARGET_DF_INSN)
2996 addr_mask |= RELOAD_REG_PRE_MODIFY;
2997 break;
3002 /* GPR and FPR registers can do REG+OFFSET addressing, except
3003 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3004 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3005 if ((addr_mask != 0) && !indexed_only_p
3006 && msize <= 8
3007 && (rc == RELOAD_REG_GPR
3008 || ((msize == 8 || m2 == SFmode)
3009 && (rc == RELOAD_REG_FPR
3010 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3011 addr_mask |= RELOAD_REG_OFFSET;
3013 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3014 instructions are enabled. The offset for 128-bit VSX registers is
3015 only 12-bits. While GPRs can handle the full offset range, VSX
3016 registers can only handle the restricted range. */
3017 else if ((addr_mask != 0) && !indexed_only_p
3018 && msize == 16 && TARGET_P9_VECTOR
3019 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3020 || (m2 == TImode && TARGET_VSX)))
3022 addr_mask |= RELOAD_REG_OFFSET;
3023 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3024 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3027 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3028 addressing on 128-bit types. */
3029 if (rc == RELOAD_REG_VMX && msize == 16
3030 && (addr_mask & RELOAD_REG_VALID) != 0)
3031 addr_mask |= RELOAD_REG_AND_M16;
3033 reg_addr[m].addr_mask[rc] = addr_mask;
3034 any_addr_mask |= addr_mask;
3037 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3042 /* Initialize the various global tables that are based on register size. */
3043 static void
3044 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3046 ssize_t r, m, c;
3047 int align64;
3048 int align32;
3050 /* Precalculate REGNO_REG_CLASS. */
3051 rs6000_regno_regclass[0] = GENERAL_REGS;
3052 for (r = 1; r < 32; ++r)
3053 rs6000_regno_regclass[r] = BASE_REGS;
3055 for (r = 32; r < 64; ++r)
3056 rs6000_regno_regclass[r] = FLOAT_REGS;
3058 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3059 rs6000_regno_regclass[r] = NO_REGS;
3061 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3062 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3064 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3065 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3066 rs6000_regno_regclass[r] = CR_REGS;
3068 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3069 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3070 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3071 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3072 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3073 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3074 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3075 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3076 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3077 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3079 /* Precalculate register class to simpler reload register class. We don't
3080 need all of the register classes that are combinations of different
3081 classes, just the simple ones that have constraint letters. */
3082 for (c = 0; c < N_REG_CLASSES; c++)
3083 reg_class_to_reg_type[c] = NO_REG_TYPE;
3085 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3086 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3087 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3088 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3089 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3090 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3091 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3092 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3093 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3094 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3096 if (TARGET_VSX)
3098 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3099 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3101 else
3103 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3104 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3107 /* Precalculate the valid memory formats as well as the vector information,
3108 this must be set up before the rs6000_hard_regno_nregs_internal calls
3109 below. */
3110 gcc_assert ((int)VECTOR_NONE == 0);
3111 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3112 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3114 gcc_assert ((int)CODE_FOR_nothing == 0);
3115 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3117 gcc_assert ((int)NO_REGS == 0);
3118 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3120 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3121 believes it can use native alignment or still uses 128-bit alignment. */
3122 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3124 align64 = 64;
3125 align32 = 32;
3127 else
3129 align64 = 128;
3130 align32 = 128;
3133 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3134 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3135 if (TARGET_FLOAT128_TYPE)
3137 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3138 rs6000_vector_align[KFmode] = 128;
3140 if (FLOAT128_IEEE_P (TFmode))
3142 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3143 rs6000_vector_align[TFmode] = 128;
3147 /* V2DF mode, VSX only. */
3148 if (TARGET_VSX)
3150 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3151 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3152 rs6000_vector_align[V2DFmode] = align64;
3155 /* V4SF mode, either VSX or Altivec. */
3156 if (TARGET_VSX)
3158 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3159 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3160 rs6000_vector_align[V4SFmode] = align32;
3162 else if (TARGET_ALTIVEC)
3164 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3165 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3166 rs6000_vector_align[V4SFmode] = align32;
3169 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3170 and stores. */
3171 if (TARGET_ALTIVEC)
3173 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3174 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3175 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3176 rs6000_vector_align[V4SImode] = align32;
3177 rs6000_vector_align[V8HImode] = align32;
3178 rs6000_vector_align[V16QImode] = align32;
3180 if (TARGET_VSX)
3182 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3183 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3184 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3186 else
3188 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3189 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3190 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3194 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3195 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3196 if (TARGET_VSX)
3198 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3199 rs6000_vector_unit[V2DImode]
3200 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3201 rs6000_vector_align[V2DImode] = align64;
3203 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3204 rs6000_vector_unit[V1TImode]
3205 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3206 rs6000_vector_align[V1TImode] = 128;
3209 /* DFmode, see if we want to use the VSX unit. Memory is handled
3210 differently, so don't set rs6000_vector_mem. */
3211 if (TARGET_VSX)
3213 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3214 rs6000_vector_align[DFmode] = 64;
3217 /* SFmode, see if we want to use the VSX unit. */
3218 if (TARGET_P8_VECTOR)
3220 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3221 rs6000_vector_align[SFmode] = 32;
3224 /* Allow TImode in VSX register and set the VSX memory macros. */
3225 if (TARGET_VSX)
3227 rs6000_vector_mem[TImode] = VECTOR_VSX;
3228 rs6000_vector_align[TImode] = align64;
3231 /* TODO add paired floating point vector support. */
3233 /* Register class constraints for the constraints that depend on compile
3234 switches. When the VSX code was added, different constraints were added
3235 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3236 of the VSX registers are used. The register classes for scalar floating
3237 point types is set, based on whether we allow that type into the upper
3238 (Altivec) registers. GCC has register classes to target the Altivec
3239 registers for load/store operations, to select using a VSX memory
3240 operation instead of the traditional floating point operation. The
3241 constraints are:
3243 d - Register class to use with traditional DFmode instructions.
3244 f - Register class to use with traditional SFmode instructions.
3245 v - Altivec register.
3246 wa - Any VSX register.
3247 wc - Reserved to represent individual CR bits (used in LLVM).
3248 wd - Preferred register class for V2DFmode.
3249 wf - Preferred register class for V4SFmode.
3250 wg - Float register for power6x move insns.
3251 wh - FP register for direct move instructions.
3252 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3253 wj - FP or VSX register to hold 64-bit integers for direct moves.
3254 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3255 wl - Float register if we can do 32-bit signed int loads.
3256 wm - VSX register for ISA 2.07 direct move operations.
3257 wn - always NO_REGS.
3258 wr - GPR if 64-bit mode is permitted.
3259 ws - Register class to do ISA 2.06 DF operations.
3260 wt - VSX register for TImode in VSX registers.
3261 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3262 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3263 ww - Register class to do SF conversions in with VSX operations.
3264 wx - Float register if we can do 32-bit int stores.
3265 wy - Register class to do ISA 2.07 SF operations.
3266 wz - Float register if we can do 32-bit unsigned int loads.
3267 wH - Altivec register if SImode is allowed in VSX registers.
3268 wI - VSX register if SImode is allowed in VSX registers.
3269 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3270 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3272 if (TARGET_HARD_FLOAT)
3273 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3275 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3276 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3278 if (TARGET_VSX)
3280 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3281 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3282 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3283 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3284 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3285 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3286 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3289 /* Add conditional constraints based on various options, to allow us to
3290 collapse multiple insn patterns. */
3291 if (TARGET_ALTIVEC)
3292 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3294 if (TARGET_MFPGPR) /* DFmode */
3295 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3297 if (TARGET_LFIWAX)
3298 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3300 if (TARGET_DIRECT_MOVE)
3302 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3303 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3304 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3305 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3306 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3307 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3310 if (TARGET_POWERPC64)
3312 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3313 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3316 if (TARGET_P8_VECTOR) /* SFmode */
3318 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3319 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3320 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3322 else if (TARGET_VSX)
3323 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3325 if (TARGET_STFIWX)
3326 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3328 if (TARGET_LFIWZX)
3329 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3331 if (TARGET_FLOAT128_TYPE)
3333 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3334 if (FLOAT128_IEEE_P (TFmode))
3335 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3338 if (TARGET_P9_VECTOR)
3340 /* Support for new D-form instructions. */
3341 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3343 /* Support for ISA 3.0 (power9) vectors. */
3344 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3347 /* Support for new direct moves (ISA 3.0 + 64bit). */
3348 if (TARGET_DIRECT_MOVE_128)
3349 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3351 /* Support small integers in VSX registers. */
3352 if (TARGET_P8_VECTOR)
3354 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3355 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3356 if (TARGET_P9_VECTOR)
3358 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3359 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3363 /* Set up the reload helper and direct move functions. */
3364 if (TARGET_VSX || TARGET_ALTIVEC)
3366 if (TARGET_64BIT)
3368 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3369 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3370 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3371 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3372 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3373 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3374 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3375 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3376 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3377 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3378 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3379 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3380 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3381 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3382 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3383 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3384 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3385 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3386 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3387 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3389 if (FLOAT128_VECTOR_P (KFmode))
3391 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3392 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3395 if (FLOAT128_VECTOR_P (TFmode))
3397 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3398 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3401 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3402 available. */
3403 if (TARGET_NO_SDMODE_STACK)
3405 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3406 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3409 if (TARGET_VSX)
3411 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3412 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3415 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3417 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3418 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3419 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3420 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3421 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3422 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3423 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3424 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3425 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3427 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3428 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3429 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3430 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3431 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3432 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3433 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3434 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3435 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3437 if (FLOAT128_VECTOR_P (KFmode))
3439 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3440 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3443 if (FLOAT128_VECTOR_P (TFmode))
3445 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3446 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3450 else
3452 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3453 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3454 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3455 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3456 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3457 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3458 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3459 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3460 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3461 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3462 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3463 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3464 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3465 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3466 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3467 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3468 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3469 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3470 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3471 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3473 if (FLOAT128_VECTOR_P (KFmode))
3475 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3476 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3479 if (FLOAT128_IEEE_P (TFmode))
3481 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3482 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3485 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3486 available. */
3487 if (TARGET_NO_SDMODE_STACK)
3489 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3490 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3493 if (TARGET_VSX)
3495 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3496 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3499 if (TARGET_DIRECT_MOVE)
3501 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3502 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3503 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3507 reg_addr[DFmode].scalar_in_vmx_p = true;
3508 reg_addr[DImode].scalar_in_vmx_p = true;
3510 if (TARGET_P8_VECTOR)
3512 reg_addr[SFmode].scalar_in_vmx_p = true;
3513 reg_addr[SImode].scalar_in_vmx_p = true;
3515 if (TARGET_P9_VECTOR)
3517 reg_addr[HImode].scalar_in_vmx_p = true;
3518 reg_addr[QImode].scalar_in_vmx_p = true;
3523 /* Setup the fusion operations. */
3524 if (TARGET_P8_FUSION)
3526 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3527 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3528 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3529 if (TARGET_64BIT)
3530 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3533 if (TARGET_P9_FUSION)
3535 struct fuse_insns {
3536 enum machine_mode mode; /* mode of the fused type. */
3537 enum machine_mode pmode; /* pointer mode. */
3538 enum rs6000_reload_reg_type rtype; /* register type. */
3539 enum insn_code load; /* load insn. */
3540 enum insn_code store; /* store insn. */
3543 static const struct fuse_insns addis_insns[] = {
3544 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3545 CODE_FOR_fusion_vsx_di_sf_load,
3546 CODE_FOR_fusion_vsx_di_sf_store },
3548 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3549 CODE_FOR_fusion_vsx_si_sf_load,
3550 CODE_FOR_fusion_vsx_si_sf_store },
3552 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3553 CODE_FOR_fusion_vsx_di_df_load,
3554 CODE_FOR_fusion_vsx_di_df_store },
3556 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3557 CODE_FOR_fusion_vsx_si_df_load,
3558 CODE_FOR_fusion_vsx_si_df_store },
3560 { E_DImode, E_DImode, RELOAD_REG_FPR,
3561 CODE_FOR_fusion_vsx_di_di_load,
3562 CODE_FOR_fusion_vsx_di_di_store },
3564 { E_DImode, E_SImode, RELOAD_REG_FPR,
3565 CODE_FOR_fusion_vsx_si_di_load,
3566 CODE_FOR_fusion_vsx_si_di_store },
3568 { E_QImode, E_DImode, RELOAD_REG_GPR,
3569 CODE_FOR_fusion_gpr_di_qi_load,
3570 CODE_FOR_fusion_gpr_di_qi_store },
3572 { E_QImode, E_SImode, RELOAD_REG_GPR,
3573 CODE_FOR_fusion_gpr_si_qi_load,
3574 CODE_FOR_fusion_gpr_si_qi_store },
3576 { E_HImode, E_DImode, RELOAD_REG_GPR,
3577 CODE_FOR_fusion_gpr_di_hi_load,
3578 CODE_FOR_fusion_gpr_di_hi_store },
3580 { E_HImode, E_SImode, RELOAD_REG_GPR,
3581 CODE_FOR_fusion_gpr_si_hi_load,
3582 CODE_FOR_fusion_gpr_si_hi_store },
3584 { E_SImode, E_DImode, RELOAD_REG_GPR,
3585 CODE_FOR_fusion_gpr_di_si_load,
3586 CODE_FOR_fusion_gpr_di_si_store },
3588 { E_SImode, E_SImode, RELOAD_REG_GPR,
3589 CODE_FOR_fusion_gpr_si_si_load,
3590 CODE_FOR_fusion_gpr_si_si_store },
3592 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3593 CODE_FOR_fusion_gpr_di_sf_load,
3594 CODE_FOR_fusion_gpr_di_sf_store },
3596 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3597 CODE_FOR_fusion_gpr_si_sf_load,
3598 CODE_FOR_fusion_gpr_si_sf_store },
3600 { E_DImode, E_DImode, RELOAD_REG_GPR,
3601 CODE_FOR_fusion_gpr_di_di_load,
3602 CODE_FOR_fusion_gpr_di_di_store },
3604 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3605 CODE_FOR_fusion_gpr_di_df_load,
3606 CODE_FOR_fusion_gpr_di_df_store },
3609 machine_mode cur_pmode = Pmode;
3610 size_t i;
3612 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3614 machine_mode xmode = addis_insns[i].mode;
3615 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3617 if (addis_insns[i].pmode != cur_pmode)
3618 continue;
3620 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3621 continue;
3623 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3624 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3626 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3628 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3629 = addis_insns[i].load;
3630 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3631 = addis_insns[i].store;
3636 /* Note which types we support fusing TOC setup plus memory insn. We only do
3637 fused TOCs for medium/large code models. */
3638 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3639 && (TARGET_CMODEL != CMODEL_SMALL))
3641 reg_addr[QImode].fused_toc = true;
3642 reg_addr[HImode].fused_toc = true;
3643 reg_addr[SImode].fused_toc = true;
3644 reg_addr[DImode].fused_toc = true;
3645 if (TARGET_HARD_FLOAT)
3647 if (TARGET_SINGLE_FLOAT)
3648 reg_addr[SFmode].fused_toc = true;
3649 if (TARGET_DOUBLE_FLOAT)
3650 reg_addr[DFmode].fused_toc = true;
3654 /* Precalculate HARD_REGNO_NREGS. */
3655 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3656 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3657 rs6000_hard_regno_nregs[m][r]
3658 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3660 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3661 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3662 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3663 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3664 rs6000_hard_regno_mode_ok_p[m][r] = true;
3666 /* Precalculate CLASS_MAX_NREGS sizes. */
3667 for (c = 0; c < LIM_REG_CLASSES; ++c)
3669 int reg_size;
3671 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3672 reg_size = UNITS_PER_VSX_WORD;
3674 else if (c == ALTIVEC_REGS)
3675 reg_size = UNITS_PER_ALTIVEC_WORD;
3677 else if (c == FLOAT_REGS)
3678 reg_size = UNITS_PER_FP_WORD;
3680 else
3681 reg_size = UNITS_PER_WORD;
3683 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3685 machine_mode m2 = (machine_mode)m;
3686 int reg_size2 = reg_size;
3688 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3689 in VSX. */
3690 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3691 reg_size2 = UNITS_PER_FP_WORD;
3693 rs6000_class_max_nregs[m][c]
3694 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3698 /* Calculate which modes to automatically generate code to use a the
3699 reciprocal divide and square root instructions. In the future, possibly
3700 automatically generate the instructions even if the user did not specify
3701 -mrecip. The older machines double precision reciprocal sqrt estimate is
3702 not accurate enough. */
3703 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3704 if (TARGET_FRES)
3705 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3706 if (TARGET_FRE)
3707 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3708 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3709 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3710 if (VECTOR_UNIT_VSX_P (V2DFmode))
3711 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3713 if (TARGET_FRSQRTES)
3714 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3715 if (TARGET_FRSQRTE)
3716 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3717 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3718 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3719 if (VECTOR_UNIT_VSX_P (V2DFmode))
3720 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3722 if (rs6000_recip_control)
3724 if (!flag_finite_math_only)
3725 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3726 "-ffast-math");
3727 if (flag_trapping_math)
3728 warning (0, "%qs requires %qs or %qs", "-mrecip",
3729 "-fno-trapping-math", "-ffast-math");
3730 if (!flag_reciprocal_math)
3731 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3732 "-ffast-math");
3733 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3735 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3736 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3737 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3739 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3740 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3741 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3743 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3744 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3745 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3747 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3748 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3749 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3751 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3752 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3753 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3755 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3756 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3757 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3759 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3760 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3761 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3763 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3764 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3765 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3769 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3770 legitimate address support to figure out the appropriate addressing to
3771 use. */
3772 rs6000_setup_reg_addr_masks ();
3774 if (global_init_p || TARGET_DEBUG_TARGET)
3776 if (TARGET_DEBUG_REG)
3777 rs6000_debug_reg_global ();
3779 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3780 fprintf (stderr,
3781 "SImode variable mult cost = %d\n"
3782 "SImode constant mult cost = %d\n"
3783 "SImode short constant mult cost = %d\n"
3784 "DImode multipliciation cost = %d\n"
3785 "SImode division cost = %d\n"
3786 "DImode division cost = %d\n"
3787 "Simple fp operation cost = %d\n"
3788 "DFmode multiplication cost = %d\n"
3789 "SFmode division cost = %d\n"
3790 "DFmode division cost = %d\n"
3791 "cache line size = %d\n"
3792 "l1 cache size = %d\n"
3793 "l2 cache size = %d\n"
3794 "simultaneous prefetches = %d\n"
3795 "\n",
3796 rs6000_cost->mulsi,
3797 rs6000_cost->mulsi_const,
3798 rs6000_cost->mulsi_const9,
3799 rs6000_cost->muldi,
3800 rs6000_cost->divsi,
3801 rs6000_cost->divdi,
3802 rs6000_cost->fp,
3803 rs6000_cost->dmul,
3804 rs6000_cost->sdiv,
3805 rs6000_cost->ddiv,
3806 rs6000_cost->cache_line_size,
3807 rs6000_cost->l1_cache_size,
3808 rs6000_cost->l2_cache_size,
3809 rs6000_cost->simultaneous_prefetches);
3813 #if TARGET_MACHO
3814 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3816 static void
3817 darwin_rs6000_override_options (void)
3819 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3820 off. */
3821 rs6000_altivec_abi = 1;
3822 TARGET_ALTIVEC_VRSAVE = 1;
3823 rs6000_current_abi = ABI_DARWIN;
3825 if (DEFAULT_ABI == ABI_DARWIN
3826 && TARGET_64BIT)
3827 darwin_one_byte_bool = 1;
3829 if (TARGET_64BIT && ! TARGET_POWERPC64)
3831 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3832 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3834 if (flag_mkernel)
3836 rs6000_default_long_calls = 1;
3837 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3840 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3841 Altivec. */
3842 if (!flag_mkernel && !flag_apple_kext
3843 && TARGET_64BIT
3844 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3845 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3847 /* Unless the user (not the configurer) has explicitly overridden
3848 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3849 G4 unless targeting the kernel. */
3850 if (!flag_mkernel
3851 && !flag_apple_kext
3852 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3853 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3854 && ! global_options_set.x_rs6000_cpu_index)
3856 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3859 #endif
3861 /* If not otherwise specified by a target, make 'long double' equivalent to
3862 'double'. */
3864 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3865 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3866 #endif
3868 /* Return the builtin mask of the various options used that could affect which
3869 builtins were used. In the past we used target_flags, but we've run out of
3870 bits, and some options like PAIRED are no longer in target_flags. */
3872 HOST_WIDE_INT
3873 rs6000_builtin_mask_calculate (void)
3875 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3876 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3877 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3878 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3879 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3880 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3881 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3882 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3883 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3884 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3885 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3886 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3887 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3888 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3889 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3890 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3891 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3892 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3893 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3894 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3895 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0));
3898 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3899 to clobber the XER[CA] bit because clobbering that bit without telling
3900 the compiler worked just fine with versions of GCC before GCC 5, and
3901 breaking a lot of older code in ways that are hard to track down is
3902 not such a great idea. */
3904 static rtx_insn *
3905 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3906 vec<const char *> &/*constraints*/,
3907 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3909 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3910 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3911 return NULL;
3914 /* Override command line options.
3916 Combine build-specific configuration information with options
3917 specified on the command line to set various state variables which
3918 influence code generation, optimization, and expansion of built-in
3919 functions. Assure that command-line configuration preferences are
3920 compatible with each other and with the build configuration; issue
3921 warnings while adjusting configuration or error messages while
3922 rejecting configuration.
3924 Upon entry to this function:
3926 This function is called once at the beginning of
3927 compilation, and then again at the start and end of compiling
3928 each section of code that has a different configuration, as
3929 indicated, for example, by adding the
3931 __attribute__((__target__("cpu=power9")))
3933 qualifier to a function definition or, for example, by bracketing
3934 code between
3936 #pragma GCC target("altivec")
3940 #pragma GCC reset_options
3942 directives. Parameter global_init_p is true for the initial
3943 invocation, which initializes global variables, and false for all
3944 subsequent invocations.
3947 Various global state information is assumed to be valid. This
3948 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3949 default CPU specified at build configure time, TARGET_DEFAULT,
3950 representing the default set of option flags for the default
3951 target, and global_options_set.x_rs6000_isa_flags, representing
3952 which options were requested on the command line.
3954 Upon return from this function:
3956 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3957 was set by name on the command line. Additionally, if certain
3958 attributes are automatically enabled or disabled by this function
3959 in order to assure compatibility between options and
3960 configuration, the flags associated with those attributes are
3961 also set. By setting these "explicit bits", we avoid the risk
3962 that other code might accidentally overwrite these particular
3963 attributes with "default values".
3965 The various bits of rs6000_isa_flags are set to indicate the
3966 target options that have been selected for the most current
3967 compilation efforts. This has the effect of also turning on the
3968 associated TARGET_XXX values since these are macros which are
3969 generally defined to test the corresponding bit of the
3970 rs6000_isa_flags variable.
3972 The variable rs6000_builtin_mask is set to represent the target
3973 options for the most current compilation efforts, consistent with
3974 the current contents of rs6000_isa_flags. This variable controls
3975 expansion of built-in functions.
3977 Various other global variables and fields of global structures
3978 (over 50 in all) are initialized to reflect the desired options
3979 for the most current compilation efforts. */
3981 static bool
3982 rs6000_option_override_internal (bool global_init_p)
3984 bool ret = true;
3985 bool have_cpu = false;
3987 /* The default cpu requested at configure time, if any. */
3988 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3990 HOST_WIDE_INT set_masks;
3991 HOST_WIDE_INT ignore_masks;
3992 int cpu_index;
3993 int tune_index;
3994 struct cl_target_option *main_target_opt
3995 = ((global_init_p || target_option_default_node == NULL)
3996 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3998 /* Print defaults. */
3999 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4000 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4002 /* Remember the explicit arguments. */
4003 if (global_init_p)
4004 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4006 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4007 library functions, so warn about it. The flag may be useful for
4008 performance studies from time to time though, so don't disable it
4009 entirely. */
4010 if (global_options_set.x_rs6000_alignment_flags
4011 && rs6000_alignment_flags == MASK_ALIGN_POWER
4012 && DEFAULT_ABI == ABI_DARWIN
4013 && TARGET_64BIT)
4014 warning (0, "%qs is not supported for 64-bit Darwin;"
4015 " it is incompatible with the installed C and C++ libraries",
4016 "-malign-power");
4018 /* Numerous experiment shows that IRA based loop pressure
4019 calculation works better for RTL loop invariant motion on targets
4020 with enough (>= 32) registers. It is an expensive optimization.
4021 So it is on only for peak performance. */
4022 if (optimize >= 3 && global_init_p
4023 && !global_options_set.x_flag_ira_loop_pressure)
4024 flag_ira_loop_pressure = 1;
4026 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4027 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4028 options were already specified. */
4029 if (flag_sanitize & SANITIZE_USER_ADDRESS
4030 && !global_options_set.x_flag_asynchronous_unwind_tables)
4031 flag_asynchronous_unwind_tables = 1;
4033 /* Set the pointer size. */
4034 if (TARGET_64BIT)
4036 rs6000_pmode = DImode;
4037 rs6000_pointer_size = 64;
4039 else
4041 rs6000_pmode = SImode;
4042 rs6000_pointer_size = 32;
4045 /* Some OSs don't support saving the high part of 64-bit registers on context
4046 switch. Other OSs don't support saving Altivec registers. On those OSs,
4047 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4048 if the user wants either, the user must explicitly specify them and we
4049 won't interfere with the user's specification. */
4051 set_masks = POWERPC_MASKS;
4052 #ifdef OS_MISSING_POWERPC64
4053 if (OS_MISSING_POWERPC64)
4054 set_masks &= ~OPTION_MASK_POWERPC64;
4055 #endif
4056 #ifdef OS_MISSING_ALTIVEC
4057 if (OS_MISSING_ALTIVEC)
4058 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4059 | OTHER_VSX_VECTOR_MASKS);
4060 #endif
4062 /* Don't override by the processor default if given explicitly. */
4063 set_masks &= ~rs6000_isa_flags_explicit;
4065 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4066 the cpu in a target attribute or pragma, but did not specify a tuning
4067 option, use the cpu for the tuning option rather than the option specified
4068 with -mtune on the command line. Process a '--with-cpu' configuration
4069 request as an implicit --cpu. */
4070 if (rs6000_cpu_index >= 0)
4072 cpu_index = rs6000_cpu_index;
4073 have_cpu = true;
4075 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4077 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
4078 have_cpu = true;
4080 else if (implicit_cpu)
4082 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
4083 have_cpu = true;
4085 else
4087 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4088 const char *default_cpu = ((!TARGET_POWERPC64)
4089 ? "powerpc"
4090 : ((BYTES_BIG_ENDIAN)
4091 ? "powerpc64"
4092 : "powerpc64le"));
4094 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4095 have_cpu = false;
4098 gcc_assert (cpu_index >= 0);
4100 if (have_cpu)
4102 #ifndef HAVE_AS_POWER9
4103 if (processor_target_table[rs6000_cpu_index].processor
4104 == PROCESSOR_POWER9)
4106 have_cpu = false;
4107 warning (0, "will not generate power9 instructions because "
4108 "assembler lacks power9 support");
4110 #endif
4111 #ifndef HAVE_AS_POWER8
4112 if (processor_target_table[rs6000_cpu_index].processor
4113 == PROCESSOR_POWER8)
4115 have_cpu = false;
4116 warning (0, "will not generate power8 instructions because "
4117 "assembler lacks power8 support");
4119 #endif
4120 #ifndef HAVE_AS_POPCNTD
4121 if (processor_target_table[rs6000_cpu_index].processor
4122 == PROCESSOR_POWER7)
4124 have_cpu = false;
4125 warning (0, "will not generate power7 instructions because "
4126 "assembler lacks power7 support");
4128 #endif
4129 #ifndef HAVE_AS_DFP
4130 if (processor_target_table[rs6000_cpu_index].processor
4131 == PROCESSOR_POWER6)
4133 have_cpu = false;
4134 warning (0, "will not generate power6 instructions because "
4135 "assembler lacks power6 support");
4137 #endif
4138 #ifndef HAVE_AS_POPCNTB
4139 if (processor_target_table[rs6000_cpu_index].processor
4140 == PROCESSOR_POWER5)
4142 have_cpu = false;
4143 warning (0, "will not generate power5 instructions because "
4144 "assembler lacks power5 support");
4146 #endif
4148 if (!have_cpu)
4150 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4151 const char *default_cpu = (!TARGET_POWERPC64
4152 ? "powerpc"
4153 : (BYTES_BIG_ENDIAN
4154 ? "powerpc64"
4155 : "powerpc64le"));
4157 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4161 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4162 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4163 with those from the cpu, except for options that were explicitly set. If
4164 we don't have a cpu, do not override the target bits set in
4165 TARGET_DEFAULT. */
4166 if (have_cpu)
4168 rs6000_isa_flags &= ~set_masks;
4169 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4170 & set_masks);
4172 else
4174 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4175 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4176 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4177 to using rs6000_isa_flags, we need to do the initialization here.
4179 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4180 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4181 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
4182 : processor_target_table[cpu_index].target_enable);
4183 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4186 if (rs6000_tune_index >= 0)
4187 tune_index = rs6000_tune_index;
4188 else if (have_cpu)
4189 rs6000_tune_index = tune_index = cpu_index;
4190 else
4192 size_t i;
4193 enum processor_type tune_proc
4194 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4196 tune_index = -1;
4197 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4198 if (processor_target_table[i].processor == tune_proc)
4200 rs6000_tune_index = tune_index = i;
4201 break;
4205 gcc_assert (tune_index >= 0);
4206 rs6000_cpu = processor_target_table[tune_index].processor;
4208 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4209 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4210 || rs6000_cpu == PROCESSOR_PPCE5500)
4212 if (TARGET_ALTIVEC)
4213 error ("AltiVec not supported in this target");
4216 /* If we are optimizing big endian systems for space, use the load/store
4217 multiple and string instructions. */
4218 if (BYTES_BIG_ENDIAN && optimize_size)
4219 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4220 | OPTION_MASK_STRING);
4222 /* Don't allow -mmultiple or -mstring on little endian systems
4223 unless the cpu is a 750, because the hardware doesn't support the
4224 instructions used in little endian mode, and causes an alignment
4225 trap. The 750 does not cause an alignment trap (except when the
4226 target is unaligned). */
4228 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4230 if (TARGET_MULTIPLE)
4232 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4233 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4234 warning (0, "%qs is not supported on little endian systems",
4235 "-mmultiple");
4238 if (TARGET_STRING)
4240 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4241 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4242 warning (0, "%qs is not supported on little endian systems",
4243 "-mstring");
4247 /* If little-endian, default to -mstrict-align on older processors.
4248 Testing for htm matches power8 and later. */
4249 if (!BYTES_BIG_ENDIAN
4250 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4251 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4253 /* -maltivec={le,be} implies -maltivec. */
4254 if (rs6000_altivec_element_order != 0)
4255 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4257 /* Disallow -maltivec=le in big endian mode for now. This is not
4258 known to be useful for anyone. */
4259 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4261 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4262 rs6000_altivec_element_order = 0;
4265 if (!rs6000_fold_gimple)
4266 fprintf (stderr,
4267 "gimple folding of rs6000 builtins has been disabled.\n");
4269 /* Add some warnings for VSX. */
4270 if (TARGET_VSX)
4272 const char *msg = NULL;
4273 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4275 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4276 msg = N_("-mvsx requires hardware floating point");
4277 else
4279 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4280 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4283 else if (TARGET_PAIRED_FLOAT)
4284 msg = N_("-mvsx and -mpaired are incompatible");
4285 else if (TARGET_AVOID_XFORM > 0)
4286 msg = N_("-mvsx needs indexed addressing");
4287 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4288 & OPTION_MASK_ALTIVEC))
4290 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4291 msg = N_("-mvsx and -mno-altivec are incompatible");
4292 else
4293 msg = N_("-mno-altivec disables vsx");
4296 if (msg)
4298 warning (0, msg);
4299 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4300 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4304 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4305 the -mcpu setting to enable options that conflict. */
4306 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4307 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4308 | OPTION_MASK_ALTIVEC
4309 | OPTION_MASK_VSX)) != 0)
4310 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4311 | OPTION_MASK_DIRECT_MOVE)
4312 & ~rs6000_isa_flags_explicit);
4314 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4315 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4317 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4318 off all of the options that depend on those flags. */
4319 ignore_masks = rs6000_disable_incompatible_switches ();
4321 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4322 unless the user explicitly used the -mno-<option> to disable the code. */
4323 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4324 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4325 else if (TARGET_P9_MINMAX)
4327 if (have_cpu)
4329 if (cpu_index == PROCESSOR_POWER9)
4331 /* legacy behavior: allow -mcpu=power9 with certain
4332 capabilities explicitly disabled. */
4333 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4335 else
4336 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4337 "for <xxx> less than power9", "-mcpu");
4339 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4340 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4341 & rs6000_isa_flags_explicit))
4342 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4343 were explicitly cleared. */
4344 error ("%qs incompatible with explicitly disabled options",
4345 "-mpower9-minmax");
4346 else
4347 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4349 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4350 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4351 else if (TARGET_VSX)
4352 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4353 else if (TARGET_POPCNTD)
4354 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4355 else if (TARGET_DFP)
4356 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4357 else if (TARGET_CMPB)
4358 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4359 else if (TARGET_FPRND)
4360 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4361 else if (TARGET_POPCNTB)
4362 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4363 else if (TARGET_ALTIVEC)
4364 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4366 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4368 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4369 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4370 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4373 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4375 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4376 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4377 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4380 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4382 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4383 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4384 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4387 if (TARGET_P8_VECTOR && !TARGET_VSX)
4389 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4390 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4391 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4392 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4394 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4395 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4396 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4398 else
4400 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4401 not explicit. */
4402 rs6000_isa_flags |= OPTION_MASK_VSX;
4403 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4407 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4409 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4410 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4411 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4414 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4415 silently turn off quad memory mode. */
4416 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4418 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4419 warning (0, N_("-mquad-memory requires 64-bit mode"));
4421 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4422 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4424 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4425 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4428 /* Non-atomic quad memory load/store are disabled for little endian, since
4429 the words are reversed, but atomic operations can still be done by
4430 swapping the words. */
4431 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4433 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4434 warning (0, N_("-mquad-memory is not available in little endian "
4435 "mode"));
4437 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4440 /* Assume if the user asked for normal quad memory instructions, they want
4441 the atomic versions as well, unless they explicity told us not to use quad
4442 word atomic instructions. */
4443 if (TARGET_QUAD_MEMORY
4444 && !TARGET_QUAD_MEMORY_ATOMIC
4445 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4446 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4448 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4449 generating power8 instructions. */
4450 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4451 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4452 & OPTION_MASK_P8_FUSION);
4454 /* Setting additional fusion flags turns on base fusion. */
4455 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4457 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4459 if (TARGET_P8_FUSION_SIGN)
4460 error ("%qs requires %qs", "-mpower8-fusion-sign",
4461 "-mpower8-fusion");
4463 if (TARGET_TOC_FUSION)
4464 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4466 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4468 else
4469 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4472 /* Power9 fusion is a superset over power8 fusion. */
4473 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4475 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4477 /* We prefer to not mention undocumented options in
4478 error messages. However, if users have managed to select
4479 power9-fusion without selecting power8-fusion, they
4480 already know about undocumented flags. */
4481 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4482 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4484 else
4485 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4488 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4489 generating power9 instructions. */
4490 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4491 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4492 & OPTION_MASK_P9_FUSION);
4494 /* Power8 does not fuse sign extended loads with the addis. If we are
4495 optimizing at high levels for speed, convert a sign extended load into a
4496 zero extending load, and an explicit sign extension. */
4497 if (TARGET_P8_FUSION
4498 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4499 && optimize_function_for_speed_p (cfun)
4500 && optimize >= 3)
4501 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4503 /* TOC fusion requires 64-bit and medium/large code model. */
4504 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4506 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4507 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4508 warning (0, N_("-mtoc-fusion requires 64-bit"));
4511 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4513 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4514 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4515 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4518 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4519 model. */
4520 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4521 && (TARGET_CMODEL != CMODEL_SMALL)
4522 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4523 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4525 /* ISA 3.0 vector instructions include ISA 2.07. */
4526 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4528 /* We prefer to not mention undocumented options in
4529 error messages. However, if users have managed to select
4530 power9-vector without selecting power8-vector, they
4531 already know about undocumented flags. */
4532 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4533 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4534 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4535 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4537 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4538 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4539 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4541 else
4543 /* OPTION_MASK_P9_VECTOR is explicit and
4544 OPTION_MASK_P8_VECTOR is not explicit. */
4545 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4546 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4550 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4551 support. If we only have ISA 2.06 support, and the user did not specify
4552 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4553 but we don't enable the full vectorization support */
4554 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4555 TARGET_ALLOW_MOVMISALIGN = 1;
4557 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4559 if (TARGET_ALLOW_MOVMISALIGN > 0
4560 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4561 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4563 TARGET_ALLOW_MOVMISALIGN = 0;
4566 /* Determine when unaligned vector accesses are permitted, and when
4567 they are preferred over masked Altivec loads. Note that if
4568 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4569 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4570 not true. */
4571 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4573 if (!TARGET_VSX)
4575 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4576 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4578 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4581 else if (!TARGET_ALLOW_MOVMISALIGN)
4583 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4584 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4585 "-mallow-movmisalign");
4587 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4591 /* Set long double size before the IEEE 128-bit tests. */
4592 if (!global_options_set.x_rs6000_long_double_type_size)
4594 if (main_target_opt != NULL
4595 && (main_target_opt->x_rs6000_long_double_type_size
4596 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4597 error ("target attribute or pragma changes long double size");
4598 else
4599 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4602 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4603 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4604 pick up this default. */
4605 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4606 if (!global_options_set.x_rs6000_ieeequad)
4607 rs6000_ieeequad = 1;
4608 #endif
4610 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4611 sytems, but don't enable the __float128 keyword. */
4612 if (TARGET_VSX && TARGET_LONG_DOUBLE_128
4613 && (TARGET_FLOAT128_ENABLE_TYPE || TARGET_IEEEQUAD)
4614 && ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) == 0))
4615 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4617 /* IEEE 128-bit floating point requires VSX support. */
4618 if (!TARGET_VSX)
4620 if (TARGET_FLOAT128_KEYWORD)
4622 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4623 error ("%qs requires VSX support", "-mfloat128");
4625 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4626 | OPTION_MASK_FLOAT128_KEYWORD
4627 | OPTION_MASK_FLOAT128_HW);
4630 else if (TARGET_FLOAT128_TYPE)
4632 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) != 0)
4633 error ("%qs requires VSX support", "-mfloat128-type");
4635 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4636 | OPTION_MASK_FLOAT128_KEYWORD
4637 | OPTION_MASK_FLOAT128_HW);
4641 /* -mfloat128 and -mfloat128-hardware internally require the underlying IEEE
4642 128-bit floating point support to be enabled. */
4643 if (!TARGET_FLOAT128_TYPE)
4645 if (TARGET_FLOAT128_KEYWORD)
4647 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4649 error ("%qs requires %qs", "-mfloat128", "-mfloat128-type");
4650 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4651 | OPTION_MASK_FLOAT128_KEYWORD
4652 | OPTION_MASK_FLOAT128_HW);
4654 else
4655 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4658 if (TARGET_FLOAT128_HW)
4660 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4662 error ("%qs requires %qs", "-mfloat128-hardware",
4663 "-mfloat128-type");
4664 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4666 else
4667 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4668 | OPTION_MASK_FLOAT128_KEYWORD
4669 | OPTION_MASK_FLOAT128_HW);
4673 /* If we have -mfloat128-type and full ISA 3.0 support, enable
4674 -mfloat128-hardware by default. However, don't enable the __float128
4675 keyword. If the user explicitly turned on -mfloat128-hardware, enable the
4676 -mfloat128 option as well if it was not already set. */
4677 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW
4678 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4679 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4680 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4682 if (TARGET_FLOAT128_HW
4683 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4685 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4686 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4688 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4691 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4693 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4694 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4696 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4699 if (TARGET_FLOAT128_HW && !TARGET_FLOAT128_KEYWORD
4700 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0
4701 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4702 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4704 /* Print the options after updating the defaults. */
4705 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4706 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4708 /* E500mc does "better" if we inline more aggressively. Respect the
4709 user's opinion, though. */
4710 if (rs6000_block_move_inline_limit == 0
4711 && (rs6000_cpu == PROCESSOR_PPCE500MC
4712 || rs6000_cpu == PROCESSOR_PPCE500MC64
4713 || rs6000_cpu == PROCESSOR_PPCE5500
4714 || rs6000_cpu == PROCESSOR_PPCE6500))
4715 rs6000_block_move_inline_limit = 128;
4717 /* store_one_arg depends on expand_block_move to handle at least the
4718 size of reg_parm_stack_space. */
4719 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4720 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4722 if (global_init_p)
4724 /* If the appropriate debug option is enabled, replace the target hooks
4725 with debug versions that call the real version and then prints
4726 debugging information. */
4727 if (TARGET_DEBUG_COST)
4729 targetm.rtx_costs = rs6000_debug_rtx_costs;
4730 targetm.address_cost = rs6000_debug_address_cost;
4731 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4734 if (TARGET_DEBUG_ADDR)
4736 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4737 targetm.legitimize_address = rs6000_debug_legitimize_address;
4738 rs6000_secondary_reload_class_ptr
4739 = rs6000_debug_secondary_reload_class;
4740 rs6000_secondary_memory_needed_ptr
4741 = rs6000_debug_secondary_memory_needed;
4742 rs6000_cannot_change_mode_class_ptr
4743 = rs6000_debug_cannot_change_mode_class;
4744 rs6000_preferred_reload_class_ptr
4745 = rs6000_debug_preferred_reload_class;
4746 rs6000_legitimize_reload_address_ptr
4747 = rs6000_debug_legitimize_reload_address;
4748 rs6000_mode_dependent_address_ptr
4749 = rs6000_debug_mode_dependent_address;
4752 if (rs6000_veclibabi_name)
4754 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4755 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4756 else
4758 error ("unknown vectorization library ABI type (%qs) for "
4759 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4760 ret = false;
4765 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4766 target attribute or pragma which automatically enables both options,
4767 unless the altivec ABI was set. This is set by default for 64-bit, but
4768 not for 32-bit. */
4769 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4770 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4771 | OPTION_MASK_FLOAT128_TYPE
4772 | OPTION_MASK_FLOAT128_KEYWORD)
4773 & ~rs6000_isa_flags_explicit);
4775 /* Enable Altivec ABI for AIX -maltivec. */
4776 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4778 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4779 error ("target attribute or pragma changes AltiVec ABI");
4780 else
4781 rs6000_altivec_abi = 1;
4784 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4785 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4786 be explicitly overridden in either case. */
4787 if (TARGET_ELF)
4789 if (!global_options_set.x_rs6000_altivec_abi
4790 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4792 if (main_target_opt != NULL &&
4793 !main_target_opt->x_rs6000_altivec_abi)
4794 error ("target attribute or pragma changes AltiVec ABI");
4795 else
4796 rs6000_altivec_abi = 1;
4800 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4801 So far, the only darwin64 targets are also MACH-O. */
4802 if (TARGET_MACHO
4803 && DEFAULT_ABI == ABI_DARWIN
4804 && TARGET_64BIT)
4806 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4807 error ("target attribute or pragma changes darwin64 ABI");
4808 else
4810 rs6000_darwin64_abi = 1;
4811 /* Default to natural alignment, for better performance. */
4812 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4816 /* Place FP constants in the constant pool instead of TOC
4817 if section anchors enabled. */
4818 if (flag_section_anchors
4819 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4820 TARGET_NO_FP_IN_TOC = 1;
4822 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4823 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4825 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4826 SUBTARGET_OVERRIDE_OPTIONS;
4827 #endif
4828 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4829 SUBSUBTARGET_OVERRIDE_OPTIONS;
4830 #endif
4831 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4832 SUB3TARGET_OVERRIDE_OPTIONS;
4833 #endif
4835 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4836 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4838 /* For the E500 family of cores, reset the single/double FP flags to let us
4839 check that they remain constant across attributes or pragmas. Also,
4840 clear a possible request for string instructions, not supported and which
4841 we might have silently queried above for -Os.
4843 For other families, clear ISEL in case it was set implicitly.
4846 switch (rs6000_cpu)
4848 case PROCESSOR_PPC8540:
4849 case PROCESSOR_PPC8548:
4850 case PROCESSOR_PPCE500MC:
4851 case PROCESSOR_PPCE500MC64:
4852 case PROCESSOR_PPCE5500:
4853 case PROCESSOR_PPCE6500:
4855 rs6000_single_float = 0;
4856 rs6000_double_float = 0;
4858 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4860 break;
4862 default:
4864 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4865 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4867 break;
4870 if (main_target_opt)
4872 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4873 error ("target attribute or pragma changes single precision floating "
4874 "point");
4875 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4876 error ("target attribute or pragma changes double precision floating "
4877 "point");
4880 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4881 && rs6000_cpu != PROCESSOR_POWER5
4882 && rs6000_cpu != PROCESSOR_POWER6
4883 && rs6000_cpu != PROCESSOR_POWER7
4884 && rs6000_cpu != PROCESSOR_POWER8
4885 && rs6000_cpu != PROCESSOR_POWER9
4886 && rs6000_cpu != PROCESSOR_PPCA2
4887 && rs6000_cpu != PROCESSOR_CELL
4888 && rs6000_cpu != PROCESSOR_PPC476);
4889 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4890 || rs6000_cpu == PROCESSOR_POWER5
4891 || rs6000_cpu == PROCESSOR_POWER7
4892 || rs6000_cpu == PROCESSOR_POWER8);
4893 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4894 || rs6000_cpu == PROCESSOR_POWER5
4895 || rs6000_cpu == PROCESSOR_POWER6
4896 || rs6000_cpu == PROCESSOR_POWER7
4897 || rs6000_cpu == PROCESSOR_POWER8
4898 || rs6000_cpu == PROCESSOR_POWER9
4899 || rs6000_cpu == PROCESSOR_PPCE500MC
4900 || rs6000_cpu == PROCESSOR_PPCE500MC64
4901 || rs6000_cpu == PROCESSOR_PPCE5500
4902 || rs6000_cpu == PROCESSOR_PPCE6500);
4904 /* Allow debug switches to override the above settings. These are set to -1
4905 in rs6000.opt to indicate the user hasn't directly set the switch. */
4906 if (TARGET_ALWAYS_HINT >= 0)
4907 rs6000_always_hint = TARGET_ALWAYS_HINT;
4909 if (TARGET_SCHED_GROUPS >= 0)
4910 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4912 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4913 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4915 rs6000_sched_restricted_insns_priority
4916 = (rs6000_sched_groups ? 1 : 0);
4918 /* Handle -msched-costly-dep option. */
4919 rs6000_sched_costly_dep
4920 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4922 if (rs6000_sched_costly_dep_str)
4924 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4925 rs6000_sched_costly_dep = no_dep_costly;
4926 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4927 rs6000_sched_costly_dep = all_deps_costly;
4928 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4929 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4930 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4931 rs6000_sched_costly_dep = store_to_load_dep_costly;
4932 else
4933 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4934 atoi (rs6000_sched_costly_dep_str));
4937 /* Handle -minsert-sched-nops option. */
4938 rs6000_sched_insert_nops
4939 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4941 if (rs6000_sched_insert_nops_str)
4943 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4944 rs6000_sched_insert_nops = sched_finish_none;
4945 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4946 rs6000_sched_insert_nops = sched_finish_pad_groups;
4947 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4948 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4949 else
4950 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4951 atoi (rs6000_sched_insert_nops_str));
4954 /* Handle stack protector */
4955 if (!global_options_set.x_rs6000_stack_protector_guard)
4956 #ifdef TARGET_THREAD_SSP_OFFSET
4957 rs6000_stack_protector_guard = SSP_TLS;
4958 #else
4959 rs6000_stack_protector_guard = SSP_GLOBAL;
4960 #endif
4962 #ifdef TARGET_THREAD_SSP_OFFSET
4963 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4964 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4965 #endif
4967 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4969 char *endp;
4970 const char *str = rs6000_stack_protector_guard_offset_str;
4972 errno = 0;
4973 long offset = strtol (str, &endp, 0);
4974 if (!*str || *endp || errno)
4975 error ("%qs is not a valid number in %qs", str,
4976 "-mstack-protector-guard-offset=");
4978 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4979 || (TARGET_64BIT && (offset & 3)))
4980 error ("%qs is not a valid offset in %qs", str,
4981 "-mstack-protector-guard-offset=");
4983 rs6000_stack_protector_guard_offset = offset;
4986 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4988 const char *str = rs6000_stack_protector_guard_reg_str;
4989 int reg = decode_reg_name (str);
4991 if (!IN_RANGE (reg, 1, 31))
4992 error ("%qs is not a valid base register in %qs", str,
4993 "-mstack-protector-guard-reg=");
4995 rs6000_stack_protector_guard_reg = reg;
4998 if (rs6000_stack_protector_guard == SSP_TLS
4999 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
5000 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
5002 if (global_init_p)
5004 #ifdef TARGET_REGNAMES
5005 /* If the user desires alternate register names, copy in the
5006 alternate names now. */
5007 if (TARGET_REGNAMES)
5008 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
5009 #endif
5011 /* Set aix_struct_return last, after the ABI is determined.
5012 If -maix-struct-return or -msvr4-struct-return was explicitly
5013 used, don't override with the ABI default. */
5014 if (!global_options_set.x_aix_struct_return)
5015 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
5017 #if 0
5018 /* IBM XL compiler defaults to unsigned bitfields. */
5019 if (TARGET_XL_COMPAT)
5020 flag_signed_bitfields = 0;
5021 #endif
5023 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
5024 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
5026 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
5028 /* We can only guarantee the availability of DI pseudo-ops when
5029 assembling for 64-bit targets. */
5030 if (!TARGET_64BIT)
5032 targetm.asm_out.aligned_op.di = NULL;
5033 targetm.asm_out.unaligned_op.di = NULL;
5037 /* Set branch target alignment, if not optimizing for size. */
5038 if (!optimize_size)
5040 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
5041 aligned 8byte to avoid misprediction by the branch predictor. */
5042 if (rs6000_cpu == PROCESSOR_TITAN
5043 || rs6000_cpu == PROCESSOR_CELL)
5045 if (align_functions <= 0)
5046 align_functions = 8;
5047 if (align_jumps <= 0)
5048 align_jumps = 8;
5049 if (align_loops <= 0)
5050 align_loops = 8;
5052 if (rs6000_align_branch_targets)
5054 if (align_functions <= 0)
5055 align_functions = 16;
5056 if (align_jumps <= 0)
5057 align_jumps = 16;
5058 if (align_loops <= 0)
5060 can_override_loop_align = 1;
5061 align_loops = 16;
5064 if (align_jumps_max_skip <= 0)
5065 align_jumps_max_skip = 15;
5066 if (align_loops_max_skip <= 0)
5067 align_loops_max_skip = 15;
5070 /* Arrange to save and restore machine status around nested functions. */
5071 init_machine_status = rs6000_init_machine_status;
5073 /* We should always be splitting complex arguments, but we can't break
5074 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5075 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5076 targetm.calls.split_complex_arg = NULL;
5078 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5079 if (DEFAULT_ABI == ABI_AIX)
5080 targetm.calls.custom_function_descriptors = 0;
5083 /* Initialize rs6000_cost with the appropriate target costs. */
5084 if (optimize_size)
5085 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5086 else
5087 switch (rs6000_cpu)
5089 case PROCESSOR_RS64A:
5090 rs6000_cost = &rs64a_cost;
5091 break;
5093 case PROCESSOR_MPCCORE:
5094 rs6000_cost = &mpccore_cost;
5095 break;
5097 case PROCESSOR_PPC403:
5098 rs6000_cost = &ppc403_cost;
5099 break;
5101 case PROCESSOR_PPC405:
5102 rs6000_cost = &ppc405_cost;
5103 break;
5105 case PROCESSOR_PPC440:
5106 rs6000_cost = &ppc440_cost;
5107 break;
5109 case PROCESSOR_PPC476:
5110 rs6000_cost = &ppc476_cost;
5111 break;
5113 case PROCESSOR_PPC601:
5114 rs6000_cost = &ppc601_cost;
5115 break;
5117 case PROCESSOR_PPC603:
5118 rs6000_cost = &ppc603_cost;
5119 break;
5121 case PROCESSOR_PPC604:
5122 rs6000_cost = &ppc604_cost;
5123 break;
5125 case PROCESSOR_PPC604e:
5126 rs6000_cost = &ppc604e_cost;
5127 break;
5129 case PROCESSOR_PPC620:
5130 rs6000_cost = &ppc620_cost;
5131 break;
5133 case PROCESSOR_PPC630:
5134 rs6000_cost = &ppc630_cost;
5135 break;
5137 case PROCESSOR_CELL:
5138 rs6000_cost = &ppccell_cost;
5139 break;
5141 case PROCESSOR_PPC750:
5142 case PROCESSOR_PPC7400:
5143 rs6000_cost = &ppc750_cost;
5144 break;
5146 case PROCESSOR_PPC7450:
5147 rs6000_cost = &ppc7450_cost;
5148 break;
5150 case PROCESSOR_PPC8540:
5151 case PROCESSOR_PPC8548:
5152 rs6000_cost = &ppc8540_cost;
5153 break;
5155 case PROCESSOR_PPCE300C2:
5156 case PROCESSOR_PPCE300C3:
5157 rs6000_cost = &ppce300c2c3_cost;
5158 break;
5160 case PROCESSOR_PPCE500MC:
5161 rs6000_cost = &ppce500mc_cost;
5162 break;
5164 case PROCESSOR_PPCE500MC64:
5165 rs6000_cost = &ppce500mc64_cost;
5166 break;
5168 case PROCESSOR_PPCE5500:
5169 rs6000_cost = &ppce5500_cost;
5170 break;
5172 case PROCESSOR_PPCE6500:
5173 rs6000_cost = &ppce6500_cost;
5174 break;
5176 case PROCESSOR_TITAN:
5177 rs6000_cost = &titan_cost;
5178 break;
5180 case PROCESSOR_POWER4:
5181 case PROCESSOR_POWER5:
5182 rs6000_cost = &power4_cost;
5183 break;
5185 case PROCESSOR_POWER6:
5186 rs6000_cost = &power6_cost;
5187 break;
5189 case PROCESSOR_POWER7:
5190 rs6000_cost = &power7_cost;
5191 break;
5193 case PROCESSOR_POWER8:
5194 rs6000_cost = &power8_cost;
5195 break;
5197 case PROCESSOR_POWER9:
5198 rs6000_cost = &power9_cost;
5199 break;
5201 case PROCESSOR_PPCA2:
5202 rs6000_cost = &ppca2_cost;
5203 break;
5205 default:
5206 gcc_unreachable ();
5209 if (global_init_p)
5211 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5212 rs6000_cost->simultaneous_prefetches,
5213 global_options.x_param_values,
5214 global_options_set.x_param_values);
5215 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5216 global_options.x_param_values,
5217 global_options_set.x_param_values);
5218 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5219 rs6000_cost->cache_line_size,
5220 global_options.x_param_values,
5221 global_options_set.x_param_values);
5222 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5223 global_options.x_param_values,
5224 global_options_set.x_param_values);
5226 /* Increase loop peeling limits based on performance analysis. */
5227 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5228 global_options.x_param_values,
5229 global_options_set.x_param_values);
5230 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5231 global_options.x_param_values,
5232 global_options_set.x_param_values);
5234 /* Use the 'model' -fsched-pressure algorithm by default. */
5235 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5236 SCHED_PRESSURE_MODEL,
5237 global_options.x_param_values,
5238 global_options_set.x_param_values);
5240 /* If using typedef char *va_list, signal that
5241 __builtin_va_start (&ap, 0) can be optimized to
5242 ap = __builtin_next_arg (0). */
5243 if (DEFAULT_ABI != ABI_V4)
5244 targetm.expand_builtin_va_start = NULL;
5247 /* Set up single/double float flags.
5248 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5249 then set both flags. */
5250 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5251 rs6000_single_float = rs6000_double_float = 1;
5253 /* If not explicitly specified via option, decide whether to generate indexed
5254 load/store instructions. A value of -1 indicates that the
5255 initial value of this variable has not been overwritten. During
5256 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5257 if (TARGET_AVOID_XFORM == -1)
5258 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5259 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5260 need indexed accesses and the type used is the scalar type of the element
5261 being loaded or stored. */
5262 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5263 && !TARGET_ALTIVEC);
5265 /* Set the -mrecip options. */
5266 if (rs6000_recip_name)
5268 char *p = ASTRDUP (rs6000_recip_name);
5269 char *q;
5270 unsigned int mask, i;
5271 bool invert;
5273 while ((q = strtok (p, ",")) != NULL)
5275 p = NULL;
5276 if (*q == '!')
5278 invert = true;
5279 q++;
5281 else
5282 invert = false;
5284 if (!strcmp (q, "default"))
5285 mask = ((TARGET_RECIP_PRECISION)
5286 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5287 else
5289 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5290 if (!strcmp (q, recip_options[i].string))
5292 mask = recip_options[i].mask;
5293 break;
5296 if (i == ARRAY_SIZE (recip_options))
5298 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5299 invert = false;
5300 mask = 0;
5301 ret = false;
5305 if (invert)
5306 rs6000_recip_control &= ~mask;
5307 else
5308 rs6000_recip_control |= mask;
5312 /* Set the builtin mask of the various options used that could affect which
5313 builtins were used. In the past we used target_flags, but we've run out
5314 of bits, and some options like PAIRED are no longer in target_flags. */
5315 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5316 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5317 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5318 rs6000_builtin_mask);
5320 /* Initialize all of the registers. */
5321 rs6000_init_hard_regno_mode_ok (global_init_p);
5323 /* Save the initial options in case the user does function specific options */
5324 if (global_init_p)
5325 target_option_default_node = target_option_current_node
5326 = build_target_option_node (&global_options);
5328 /* If not explicitly specified via option, decide whether to generate the
5329 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5330 if (TARGET_LINK_STACK == -1)
5331 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5333 return ret;
5336 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5337 define the target cpu type. */
5339 static void
5340 rs6000_option_override (void)
5342 (void) rs6000_option_override_internal (true);
5346 /* Implement targetm.vectorize.builtin_mask_for_load. */
5347 static tree
5348 rs6000_builtin_mask_for_load (void)
5350 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5351 if ((TARGET_ALTIVEC && !TARGET_VSX)
5352 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5353 return altivec_builtin_mask_for_load;
5354 else
5355 return 0;
5358 /* Implement LOOP_ALIGN. */
5360 rs6000_loop_align (rtx label)
5362 basic_block bb;
5363 int ninsns;
5365 /* Don't override loop alignment if -falign-loops was specified. */
5366 if (!can_override_loop_align)
5367 return align_loops_log;
5369 bb = BLOCK_FOR_INSN (label);
5370 ninsns = num_loop_insns(bb->loop_father);
5372 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5373 if (ninsns > 4 && ninsns <= 8
5374 && (rs6000_cpu == PROCESSOR_POWER4
5375 || rs6000_cpu == PROCESSOR_POWER5
5376 || rs6000_cpu == PROCESSOR_POWER6
5377 || rs6000_cpu == PROCESSOR_POWER7
5378 || rs6000_cpu == PROCESSOR_POWER8
5379 || rs6000_cpu == PROCESSOR_POWER9))
5380 return 5;
5381 else
5382 return align_loops_log;
5385 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5386 static int
5387 rs6000_loop_align_max_skip (rtx_insn *label)
5389 return (1 << rs6000_loop_align (label)) - 1;
5392 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5393 after applying N number of iterations. This routine does not determine
5394 how may iterations are required to reach desired alignment. */
5396 static bool
5397 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5399 if (is_packed)
5400 return false;
5402 if (TARGET_32BIT)
5404 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5405 return true;
5407 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5408 return true;
5410 return false;
5412 else
5414 if (TARGET_MACHO)
5415 return false;
5417 /* Assuming that all other types are naturally aligned. CHECKME! */
5418 return true;
5422 /* Return true if the vector misalignment factor is supported by the
5423 target. */
5424 static bool
5425 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5426 const_tree type,
5427 int misalignment,
5428 bool is_packed)
5430 if (TARGET_VSX)
5432 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5433 return true;
5435 /* Return if movmisalign pattern is not supported for this mode. */
5436 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5437 return false;
5439 if (misalignment == -1)
5441 /* Misalignment factor is unknown at compile time but we know
5442 it's word aligned. */
5443 if (rs6000_vector_alignment_reachable (type, is_packed))
5445 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5447 if (element_size == 64 || element_size == 32)
5448 return true;
5451 return false;
5454 /* VSX supports word-aligned vector. */
5455 if (misalignment % 4 == 0)
5456 return true;
5458 return false;
5461 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5462 static int
5463 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5464 tree vectype, int misalign)
5466 unsigned elements;
5467 tree elem_type;
5469 switch (type_of_cost)
5471 case scalar_stmt:
5472 case scalar_load:
5473 case scalar_store:
5474 case vector_stmt:
5475 case vector_load:
5476 case vector_store:
5477 case vec_to_scalar:
5478 case scalar_to_vec:
5479 case cond_branch_not_taken:
5480 return 1;
5482 case vec_perm:
5483 if (TARGET_VSX)
5484 return 3;
5485 else
5486 return 1;
5488 case vec_promote_demote:
5489 if (TARGET_VSX)
5490 return 4;
5491 else
5492 return 1;
5494 case cond_branch_taken:
5495 return 3;
5497 case unaligned_load:
5498 if (TARGET_P9_VECTOR)
5499 return 3;
5501 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5502 return 1;
5504 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5506 elements = TYPE_VECTOR_SUBPARTS (vectype);
5507 if (elements == 2)
5508 /* Double word aligned. */
5509 return 2;
5511 if (elements == 4)
5513 switch (misalign)
5515 case 8:
5516 /* Double word aligned. */
5517 return 2;
5519 case -1:
5520 /* Unknown misalignment. */
5521 case 4:
5522 case 12:
5523 /* Word aligned. */
5524 return 22;
5526 default:
5527 gcc_unreachable ();
5532 if (TARGET_ALTIVEC)
5533 /* Misaligned loads are not supported. */
5534 gcc_unreachable ();
5536 return 2;
5538 case unaligned_store:
5539 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5540 return 1;
5542 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5544 elements = TYPE_VECTOR_SUBPARTS (vectype);
5545 if (elements == 2)
5546 /* Double word aligned. */
5547 return 2;
5549 if (elements == 4)
5551 switch (misalign)
5553 case 8:
5554 /* Double word aligned. */
5555 return 2;
5557 case -1:
5558 /* Unknown misalignment. */
5559 case 4:
5560 case 12:
5561 /* Word aligned. */
5562 return 23;
5564 default:
5565 gcc_unreachable ();
5570 if (TARGET_ALTIVEC)
5571 /* Misaligned stores are not supported. */
5572 gcc_unreachable ();
5574 return 2;
5576 case vec_construct:
5577 /* This is a rough approximation assuming non-constant elements
5578 constructed into a vector via element insertion. FIXME:
5579 vec_construct is not granular enough for uniformly good
5580 decisions. If the initialization is a splat, this is
5581 cheaper than we estimate. Improve this someday. */
5582 elem_type = TREE_TYPE (vectype);
5583 /* 32-bit vectors loaded into registers are stored as double
5584 precision, so we need 2 permutes, 2 converts, and 1 merge
5585 to construct a vector of short floats from them. */
5586 if (SCALAR_FLOAT_TYPE_P (elem_type)
5587 && TYPE_PRECISION (elem_type) == 32)
5588 return 5;
5589 /* On POWER9, integer vector types are built up in GPRs and then
5590 use a direct move (2 cycles). For POWER8 this is even worse,
5591 as we need two direct moves and a merge, and the direct moves
5592 are five cycles. */
5593 else if (INTEGRAL_TYPE_P (elem_type))
5595 if (TARGET_P9_VECTOR)
5596 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5597 else
5598 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5600 else
5601 /* V2DFmode doesn't need a direct move. */
5602 return 2;
5604 default:
5605 gcc_unreachable ();
5609 /* Implement targetm.vectorize.preferred_simd_mode. */
5611 static machine_mode
5612 rs6000_preferred_simd_mode (scalar_mode mode)
5614 if (TARGET_VSX)
5615 switch (mode)
5617 case E_DFmode:
5618 return V2DFmode;
5619 default:;
5621 if (TARGET_ALTIVEC || TARGET_VSX)
5622 switch (mode)
5624 case E_SFmode:
5625 return V4SFmode;
5626 case E_TImode:
5627 return V1TImode;
5628 case E_DImode:
5629 return V2DImode;
5630 case E_SImode:
5631 return V4SImode;
5632 case E_HImode:
5633 return V8HImode;
5634 case E_QImode:
5635 return V16QImode;
5636 default:;
5638 if (TARGET_PAIRED_FLOAT
5639 && mode == SFmode)
5640 return V2SFmode;
5641 return word_mode;
5644 typedef struct _rs6000_cost_data
5646 struct loop *loop_info;
5647 unsigned cost[3];
5648 } rs6000_cost_data;
5650 /* Test for likely overcommitment of vector hardware resources. If a
5651 loop iteration is relatively large, and too large a percentage of
5652 instructions in the loop are vectorized, the cost model may not
5653 adequately reflect delays from unavailable vector resources.
5654 Penalize the loop body cost for this case. */
5656 static void
5657 rs6000_density_test (rs6000_cost_data *data)
5659 const int DENSITY_PCT_THRESHOLD = 85;
5660 const int DENSITY_SIZE_THRESHOLD = 70;
5661 const int DENSITY_PENALTY = 10;
5662 struct loop *loop = data->loop_info;
5663 basic_block *bbs = get_loop_body (loop);
5664 int nbbs = loop->num_nodes;
5665 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5666 int i, density_pct;
5668 for (i = 0; i < nbbs; i++)
5670 basic_block bb = bbs[i];
5671 gimple_stmt_iterator gsi;
5673 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5675 gimple *stmt = gsi_stmt (gsi);
5676 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5678 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5679 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5680 not_vec_cost++;
5684 free (bbs);
5685 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5687 if (density_pct > DENSITY_PCT_THRESHOLD
5688 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5690 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5691 if (dump_enabled_p ())
5692 dump_printf_loc (MSG_NOTE, vect_location,
5693 "density %d%%, cost %d exceeds threshold, penalizing "
5694 "loop body cost by %d%%", density_pct,
5695 vec_cost + not_vec_cost, DENSITY_PENALTY);
5699 /* Implement targetm.vectorize.init_cost. */
5701 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5702 instruction is needed by the vectorization. */
5703 static bool rs6000_vect_nonmem;
5705 static void *
5706 rs6000_init_cost (struct loop *loop_info)
5708 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5709 data->loop_info = loop_info;
5710 data->cost[vect_prologue] = 0;
5711 data->cost[vect_body] = 0;
5712 data->cost[vect_epilogue] = 0;
5713 rs6000_vect_nonmem = false;
5714 return data;
5717 /* Implement targetm.vectorize.add_stmt_cost. */
5719 static unsigned
5720 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5721 struct _stmt_vec_info *stmt_info, int misalign,
5722 enum vect_cost_model_location where)
5724 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5725 unsigned retval = 0;
5727 if (flag_vect_cost_model)
5729 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5730 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5731 misalign);
5732 /* Statements in an inner loop relative to the loop being
5733 vectorized are weighted more heavily. The value here is
5734 arbitrary and could potentially be improved with analysis. */
5735 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5736 count *= 50; /* FIXME. */
5738 retval = (unsigned) (count * stmt_cost);
5739 cost_data->cost[where] += retval;
5741 /* Check whether we're doing something other than just a copy loop.
5742 Not all such loops may be profitably vectorized; see
5743 rs6000_finish_cost. */
5744 if ((kind == vec_to_scalar || kind == vec_perm
5745 || kind == vec_promote_demote || kind == vec_construct
5746 || kind == scalar_to_vec)
5747 || (where == vect_body && kind == vector_stmt))
5748 rs6000_vect_nonmem = true;
5751 return retval;
5754 /* Implement targetm.vectorize.finish_cost. */
5756 static void
5757 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5758 unsigned *body_cost, unsigned *epilogue_cost)
5760 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5762 if (cost_data->loop_info)
5763 rs6000_density_test (cost_data);
5765 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5766 that require versioning for any reason. The vectorization is at
5767 best a wash inside the loop, and the versioning checks make
5768 profitability highly unlikely and potentially quite harmful. */
5769 if (cost_data->loop_info)
5771 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5772 if (!rs6000_vect_nonmem
5773 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5774 && LOOP_REQUIRES_VERSIONING (vec_info))
5775 cost_data->cost[vect_body] += 10000;
5778 *prologue_cost = cost_data->cost[vect_prologue];
5779 *body_cost = cost_data->cost[vect_body];
5780 *epilogue_cost = cost_data->cost[vect_epilogue];
5783 /* Implement targetm.vectorize.destroy_cost_data. */
5785 static void
5786 rs6000_destroy_cost_data (void *data)
5788 free (data);
5791 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5792 library with vectorized intrinsics. */
5794 static tree
5795 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5796 tree type_in)
5798 char name[32];
5799 const char *suffix = NULL;
5800 tree fntype, new_fndecl, bdecl = NULL_TREE;
5801 int n_args = 1;
5802 const char *bname;
5803 machine_mode el_mode, in_mode;
5804 int n, in_n;
5806 /* Libmass is suitable for unsafe math only as it does not correctly support
5807 parts of IEEE with the required precision such as denormals. Only support
5808 it if we have VSX to use the simd d2 or f4 functions.
5809 XXX: Add variable length support. */
5810 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5811 return NULL_TREE;
5813 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5814 n = TYPE_VECTOR_SUBPARTS (type_out);
5815 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5816 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5817 if (el_mode != in_mode
5818 || n != in_n)
5819 return NULL_TREE;
5821 switch (fn)
5823 CASE_CFN_ATAN2:
5824 CASE_CFN_HYPOT:
5825 CASE_CFN_POW:
5826 n_args = 2;
5827 gcc_fallthrough ();
5829 CASE_CFN_ACOS:
5830 CASE_CFN_ACOSH:
5831 CASE_CFN_ASIN:
5832 CASE_CFN_ASINH:
5833 CASE_CFN_ATAN:
5834 CASE_CFN_ATANH:
5835 CASE_CFN_CBRT:
5836 CASE_CFN_COS:
5837 CASE_CFN_COSH:
5838 CASE_CFN_ERF:
5839 CASE_CFN_ERFC:
5840 CASE_CFN_EXP2:
5841 CASE_CFN_EXP:
5842 CASE_CFN_EXPM1:
5843 CASE_CFN_LGAMMA:
5844 CASE_CFN_LOG10:
5845 CASE_CFN_LOG1P:
5846 CASE_CFN_LOG2:
5847 CASE_CFN_LOG:
5848 CASE_CFN_SIN:
5849 CASE_CFN_SINH:
5850 CASE_CFN_SQRT:
5851 CASE_CFN_TAN:
5852 CASE_CFN_TANH:
5853 if (el_mode == DFmode && n == 2)
5855 bdecl = mathfn_built_in (double_type_node, fn);
5856 suffix = "d2"; /* pow -> powd2 */
5858 else if (el_mode == SFmode && n == 4)
5860 bdecl = mathfn_built_in (float_type_node, fn);
5861 suffix = "4"; /* powf -> powf4 */
5863 else
5864 return NULL_TREE;
5865 if (!bdecl)
5866 return NULL_TREE;
5867 break;
5869 default:
5870 return NULL_TREE;
5873 gcc_assert (suffix != NULL);
5874 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5875 if (!bname)
5876 return NULL_TREE;
5878 strcpy (name, bname + sizeof ("__builtin_") - 1);
5879 strcat (name, suffix);
5881 if (n_args == 1)
5882 fntype = build_function_type_list (type_out, type_in, NULL);
5883 else if (n_args == 2)
5884 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5885 else
5886 gcc_unreachable ();
5888 /* Build a function declaration for the vectorized function. */
5889 new_fndecl = build_decl (BUILTINS_LOCATION,
5890 FUNCTION_DECL, get_identifier (name), fntype);
5891 TREE_PUBLIC (new_fndecl) = 1;
5892 DECL_EXTERNAL (new_fndecl) = 1;
5893 DECL_IS_NOVOPS (new_fndecl) = 1;
5894 TREE_READONLY (new_fndecl) = 1;
5896 return new_fndecl;
5899 /* Returns a function decl for a vectorized version of the builtin function
5900 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5901 if it is not available. */
5903 static tree
5904 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5905 tree type_in)
5907 machine_mode in_mode, out_mode;
5908 int in_n, out_n;
5910 if (TARGET_DEBUG_BUILTIN)
5911 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5912 combined_fn_name (combined_fn (fn)),
5913 GET_MODE_NAME (TYPE_MODE (type_out)),
5914 GET_MODE_NAME (TYPE_MODE (type_in)));
5916 if (TREE_CODE (type_out) != VECTOR_TYPE
5917 || TREE_CODE (type_in) != VECTOR_TYPE)
5918 return NULL_TREE;
5920 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5921 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5922 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5923 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5925 switch (fn)
5927 CASE_CFN_COPYSIGN:
5928 if (VECTOR_UNIT_VSX_P (V2DFmode)
5929 && out_mode == DFmode && out_n == 2
5930 && in_mode == DFmode && in_n == 2)
5931 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5932 if (VECTOR_UNIT_VSX_P (V4SFmode)
5933 && out_mode == SFmode && out_n == 4
5934 && in_mode == SFmode && in_n == 4)
5935 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5936 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5937 && out_mode == SFmode && out_n == 4
5938 && in_mode == SFmode && in_n == 4)
5939 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5940 break;
5941 CASE_CFN_CEIL:
5942 if (VECTOR_UNIT_VSX_P (V2DFmode)
5943 && out_mode == DFmode && out_n == 2
5944 && in_mode == DFmode && in_n == 2)
5945 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5946 if (VECTOR_UNIT_VSX_P (V4SFmode)
5947 && out_mode == SFmode && out_n == 4
5948 && in_mode == SFmode && in_n == 4)
5949 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5950 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5951 && out_mode == SFmode && out_n == 4
5952 && in_mode == SFmode && in_n == 4)
5953 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5954 break;
5955 CASE_CFN_FLOOR:
5956 if (VECTOR_UNIT_VSX_P (V2DFmode)
5957 && out_mode == DFmode && out_n == 2
5958 && in_mode == DFmode && in_n == 2)
5959 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5960 if (VECTOR_UNIT_VSX_P (V4SFmode)
5961 && out_mode == SFmode && out_n == 4
5962 && in_mode == SFmode && in_n == 4)
5963 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5964 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5965 && out_mode == SFmode && out_n == 4
5966 && in_mode == SFmode && in_n == 4)
5967 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5968 break;
5969 CASE_CFN_FMA:
5970 if (VECTOR_UNIT_VSX_P (V2DFmode)
5971 && out_mode == DFmode && out_n == 2
5972 && in_mode == DFmode && in_n == 2)
5973 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5974 if (VECTOR_UNIT_VSX_P (V4SFmode)
5975 && out_mode == SFmode && out_n == 4
5976 && in_mode == SFmode && in_n == 4)
5977 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5978 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5979 && out_mode == SFmode && out_n == 4
5980 && in_mode == SFmode && in_n == 4)
5981 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5982 break;
5983 CASE_CFN_TRUNC:
5984 if (VECTOR_UNIT_VSX_P (V2DFmode)
5985 && out_mode == DFmode && out_n == 2
5986 && in_mode == DFmode && in_n == 2)
5987 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5988 if (VECTOR_UNIT_VSX_P (V4SFmode)
5989 && out_mode == SFmode && out_n == 4
5990 && in_mode == SFmode && in_n == 4)
5991 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5992 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5993 && out_mode == SFmode && out_n == 4
5994 && in_mode == SFmode && in_n == 4)
5995 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5996 break;
5997 CASE_CFN_NEARBYINT:
5998 if (VECTOR_UNIT_VSX_P (V2DFmode)
5999 && flag_unsafe_math_optimizations
6000 && out_mode == DFmode && out_n == 2
6001 && in_mode == DFmode && in_n == 2)
6002 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
6003 if (VECTOR_UNIT_VSX_P (V4SFmode)
6004 && flag_unsafe_math_optimizations
6005 && out_mode == SFmode && out_n == 4
6006 && in_mode == SFmode && in_n == 4)
6007 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
6008 break;
6009 CASE_CFN_RINT:
6010 if (VECTOR_UNIT_VSX_P (V2DFmode)
6011 && !flag_trapping_math
6012 && out_mode == DFmode && out_n == 2
6013 && in_mode == DFmode && in_n == 2)
6014 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
6015 if (VECTOR_UNIT_VSX_P (V4SFmode)
6016 && !flag_trapping_math
6017 && out_mode == SFmode && out_n == 4
6018 && in_mode == SFmode && in_n == 4)
6019 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
6020 break;
6021 default:
6022 break;
6025 /* Generate calls to libmass if appropriate. */
6026 if (rs6000_veclib_handler)
6027 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
6029 return NULL_TREE;
6032 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
6034 static tree
6035 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
6036 tree type_in)
6038 machine_mode in_mode, out_mode;
6039 int in_n, out_n;
6041 if (TARGET_DEBUG_BUILTIN)
6042 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6043 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6044 GET_MODE_NAME (TYPE_MODE (type_out)),
6045 GET_MODE_NAME (TYPE_MODE (type_in)));
6047 if (TREE_CODE (type_out) != VECTOR_TYPE
6048 || TREE_CODE (type_in) != VECTOR_TYPE)
6049 return NULL_TREE;
6051 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6052 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6053 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6054 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6056 enum rs6000_builtins fn
6057 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6058 switch (fn)
6060 case RS6000_BUILTIN_RSQRTF:
6061 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6062 && out_mode == SFmode && out_n == 4
6063 && in_mode == SFmode && in_n == 4)
6064 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6065 break;
6066 case RS6000_BUILTIN_RSQRT:
6067 if (VECTOR_UNIT_VSX_P (V2DFmode)
6068 && out_mode == DFmode && out_n == 2
6069 && in_mode == DFmode && in_n == 2)
6070 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6071 break;
6072 case RS6000_BUILTIN_RECIPF:
6073 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6074 && out_mode == SFmode && out_n == 4
6075 && in_mode == SFmode && in_n == 4)
6076 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6077 break;
6078 case RS6000_BUILTIN_RECIP:
6079 if (VECTOR_UNIT_VSX_P (V2DFmode)
6080 && out_mode == DFmode && out_n == 2
6081 && in_mode == DFmode && in_n == 2)
6082 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6083 break;
6084 default:
6085 break;
6087 return NULL_TREE;
6090 /* Default CPU string for rs6000*_file_start functions. */
6091 static const char *rs6000_default_cpu;
6093 /* Do anything needed at the start of the asm file. */
6095 static void
6096 rs6000_file_start (void)
6098 char buffer[80];
6099 const char *start = buffer;
6100 FILE *file = asm_out_file;
6102 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6104 default_file_start ();
6106 if (flag_verbose_asm)
6108 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6110 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6112 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6113 start = "";
6116 if (global_options_set.x_rs6000_cpu_index)
6118 fprintf (file, "%s -mcpu=%s", start,
6119 processor_target_table[rs6000_cpu_index].name);
6120 start = "";
6123 if (global_options_set.x_rs6000_tune_index)
6125 fprintf (file, "%s -mtune=%s", start,
6126 processor_target_table[rs6000_tune_index].name);
6127 start = "";
6130 if (PPC405_ERRATUM77)
6132 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6133 start = "";
6136 #ifdef USING_ELFOS_H
6137 switch (rs6000_sdata)
6139 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6140 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6141 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6142 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6145 if (rs6000_sdata && g_switch_value)
6147 fprintf (file, "%s -G %d", start,
6148 g_switch_value);
6149 start = "";
6151 #endif
6153 if (*start == '\0')
6154 putc ('\n', file);
6157 #ifdef USING_ELFOS_H
6158 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6159 && !global_options_set.x_rs6000_cpu_index)
6161 fputs ("\t.machine ", asm_out_file);
6162 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6163 fputs ("power9\n", asm_out_file);
6164 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6165 fputs ("power8\n", asm_out_file);
6166 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6167 fputs ("power7\n", asm_out_file);
6168 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6169 fputs ("power6\n", asm_out_file);
6170 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6171 fputs ("power5\n", asm_out_file);
6172 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6173 fputs ("power4\n", asm_out_file);
6174 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6175 fputs ("ppc64\n", asm_out_file);
6176 else
6177 fputs ("ppc\n", asm_out_file);
6179 #endif
6181 if (DEFAULT_ABI == ABI_ELFv2)
6182 fprintf (file, "\t.abiversion 2\n");
6186 /* Return nonzero if this function is known to have a null epilogue. */
6189 direct_return (void)
6191 if (reload_completed)
6193 rs6000_stack_t *info = rs6000_stack_info ();
6195 if (info->first_gp_reg_save == 32
6196 && info->first_fp_reg_save == 64
6197 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6198 && ! info->lr_save_p
6199 && ! info->cr_save_p
6200 && info->vrsave_size == 0
6201 && ! info->push_p)
6202 return 1;
6205 return 0;
6208 /* Return the number of instructions it takes to form a constant in an
6209 integer register. */
6212 num_insns_constant_wide (HOST_WIDE_INT value)
6214 /* signed constant loadable with addi */
6215 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6216 return 1;
6218 /* constant loadable with addis */
6219 else if ((value & 0xffff) == 0
6220 && (value >> 31 == -1 || value >> 31 == 0))
6221 return 1;
6223 else if (TARGET_POWERPC64)
6225 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6226 HOST_WIDE_INT high = value >> 31;
6228 if (high == 0 || high == -1)
6229 return 2;
6231 high >>= 1;
6233 if (low == 0)
6234 return num_insns_constant_wide (high) + 1;
6235 else if (high == 0)
6236 return num_insns_constant_wide (low) + 1;
6237 else
6238 return (num_insns_constant_wide (high)
6239 + num_insns_constant_wide (low) + 1);
6242 else
6243 return 2;
6247 num_insns_constant (rtx op, machine_mode mode)
6249 HOST_WIDE_INT low, high;
6251 switch (GET_CODE (op))
6253 case CONST_INT:
6254 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6255 && rs6000_is_valid_and_mask (op, mode))
6256 return 2;
6257 else
6258 return num_insns_constant_wide (INTVAL (op));
6260 case CONST_WIDE_INT:
6262 int i;
6263 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6264 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6265 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6266 return ins;
6269 case CONST_DOUBLE:
6270 if (mode == SFmode || mode == SDmode)
6272 long l;
6274 if (DECIMAL_FLOAT_MODE_P (mode))
6275 REAL_VALUE_TO_TARGET_DECIMAL32
6276 (*CONST_DOUBLE_REAL_VALUE (op), l);
6277 else
6278 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6279 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6282 long l[2];
6283 if (DECIMAL_FLOAT_MODE_P (mode))
6284 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6285 else
6286 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6287 high = l[WORDS_BIG_ENDIAN == 0];
6288 low = l[WORDS_BIG_ENDIAN != 0];
6290 if (TARGET_32BIT)
6291 return (num_insns_constant_wide (low)
6292 + num_insns_constant_wide (high));
6293 else
6295 if ((high == 0 && low >= 0)
6296 || (high == -1 && low < 0))
6297 return num_insns_constant_wide (low);
6299 else if (rs6000_is_valid_and_mask (op, mode))
6300 return 2;
6302 else if (low == 0)
6303 return num_insns_constant_wide (high) + 1;
6305 else
6306 return (num_insns_constant_wide (high)
6307 + num_insns_constant_wide (low) + 1);
6310 default:
6311 gcc_unreachable ();
6315 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6316 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6317 corresponding element of the vector, but for V4SFmode and V2SFmode,
6318 the corresponding "float" is interpreted as an SImode integer. */
6320 HOST_WIDE_INT
6321 const_vector_elt_as_int (rtx op, unsigned int elt)
6323 rtx tmp;
6325 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6326 gcc_assert (GET_MODE (op) != V2DImode
6327 && GET_MODE (op) != V2DFmode);
6329 tmp = CONST_VECTOR_ELT (op, elt);
6330 if (GET_MODE (op) == V4SFmode
6331 || GET_MODE (op) == V2SFmode)
6332 tmp = gen_lowpart (SImode, tmp);
6333 return INTVAL (tmp);
6336 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6337 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6338 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6339 all items are set to the same value and contain COPIES replicas of the
6340 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6341 operand and the others are set to the value of the operand's msb. */
6343 static bool
6344 vspltis_constant (rtx op, unsigned step, unsigned copies)
6346 machine_mode mode = GET_MODE (op);
6347 machine_mode inner = GET_MODE_INNER (mode);
6349 unsigned i;
6350 unsigned nunits;
6351 unsigned bitsize;
6352 unsigned mask;
6354 HOST_WIDE_INT val;
6355 HOST_WIDE_INT splat_val;
6356 HOST_WIDE_INT msb_val;
6358 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6359 return false;
6361 nunits = GET_MODE_NUNITS (mode);
6362 bitsize = GET_MODE_BITSIZE (inner);
6363 mask = GET_MODE_MASK (inner);
6365 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6366 splat_val = val;
6367 msb_val = val >= 0 ? 0 : -1;
6369 /* Construct the value to be splatted, if possible. If not, return 0. */
6370 for (i = 2; i <= copies; i *= 2)
6372 HOST_WIDE_INT small_val;
6373 bitsize /= 2;
6374 small_val = splat_val >> bitsize;
6375 mask >>= bitsize;
6376 if (splat_val != ((HOST_WIDE_INT)
6377 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6378 | (small_val & mask)))
6379 return false;
6380 splat_val = small_val;
6383 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6384 if (EASY_VECTOR_15 (splat_val))
6387 /* Also check if we can splat, and then add the result to itself. Do so if
6388 the value is positive, of if the splat instruction is using OP's mode;
6389 for splat_val < 0, the splat and the add should use the same mode. */
6390 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6391 && (splat_val >= 0 || (step == 1 && copies == 1)))
6394 /* Also check if are loading up the most significant bit which can be done by
6395 loading up -1 and shifting the value left by -1. */
6396 else if (EASY_VECTOR_MSB (splat_val, inner))
6399 else
6400 return false;
6402 /* Check if VAL is present in every STEP-th element, and the
6403 other elements are filled with its most significant bit. */
6404 for (i = 1; i < nunits; ++i)
6406 HOST_WIDE_INT desired_val;
6407 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6408 if ((i & (step - 1)) == 0)
6409 desired_val = val;
6410 else
6411 desired_val = msb_val;
6413 if (desired_val != const_vector_elt_as_int (op, elt))
6414 return false;
6417 return true;
6420 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6421 instruction, filling in the bottom elements with 0 or -1.
6423 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6424 for the number of zeroes to shift in, or negative for the number of 0xff
6425 bytes to shift in.
6427 OP is a CONST_VECTOR. */
6430 vspltis_shifted (rtx op)
6432 machine_mode mode = GET_MODE (op);
6433 machine_mode inner = GET_MODE_INNER (mode);
6435 unsigned i, j;
6436 unsigned nunits;
6437 unsigned mask;
6439 HOST_WIDE_INT val;
6441 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6442 return false;
6444 /* We need to create pseudo registers to do the shift, so don't recognize
6445 shift vector constants after reload. */
6446 if (!can_create_pseudo_p ())
6447 return false;
6449 nunits = GET_MODE_NUNITS (mode);
6450 mask = GET_MODE_MASK (inner);
6452 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6454 /* Check if the value can really be the operand of a vspltis[bhw]. */
6455 if (EASY_VECTOR_15 (val))
6458 /* Also check if we are loading up the most significant bit which can be done
6459 by loading up -1 and shifting the value left by -1. */
6460 else if (EASY_VECTOR_MSB (val, inner))
6463 else
6464 return 0;
6466 /* Check if VAL is present in every STEP-th element until we find elements
6467 that are 0 or all 1 bits. */
6468 for (i = 1; i < nunits; ++i)
6470 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6471 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6473 /* If the value isn't the splat value, check for the remaining elements
6474 being 0/-1. */
6475 if (val != elt_val)
6477 if (elt_val == 0)
6479 for (j = i+1; j < nunits; ++j)
6481 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6482 if (const_vector_elt_as_int (op, elt2) != 0)
6483 return 0;
6486 return (nunits - i) * GET_MODE_SIZE (inner);
6489 else if ((elt_val & mask) == mask)
6491 for (j = i+1; j < nunits; ++j)
6493 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6494 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6495 return 0;
6498 return -((nunits - i) * GET_MODE_SIZE (inner));
6501 else
6502 return 0;
6506 /* If all elements are equal, we don't need to do VLSDOI. */
6507 return 0;
6511 /* Return true if OP is of the given MODE and can be synthesized
6512 with a vspltisb, vspltish or vspltisw. */
6514 bool
6515 easy_altivec_constant (rtx op, machine_mode mode)
6517 unsigned step, copies;
6519 if (mode == VOIDmode)
6520 mode = GET_MODE (op);
6521 else if (mode != GET_MODE (op))
6522 return false;
6524 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6525 constants. */
6526 if (mode == V2DFmode)
6527 return zero_constant (op, mode);
6529 else if (mode == V2DImode)
6531 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6532 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6533 return false;
6535 if (zero_constant (op, mode))
6536 return true;
6538 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6539 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6540 return true;
6542 return false;
6545 /* V1TImode is a special container for TImode. Ignore for now. */
6546 else if (mode == V1TImode)
6547 return false;
6549 /* Start with a vspltisw. */
6550 step = GET_MODE_NUNITS (mode) / 4;
6551 copies = 1;
6553 if (vspltis_constant (op, step, copies))
6554 return true;
6556 /* Then try with a vspltish. */
6557 if (step == 1)
6558 copies <<= 1;
6559 else
6560 step >>= 1;
6562 if (vspltis_constant (op, step, copies))
6563 return true;
6565 /* And finally a vspltisb. */
6566 if (step == 1)
6567 copies <<= 1;
6568 else
6569 step >>= 1;
6571 if (vspltis_constant (op, step, copies))
6572 return true;
6574 if (vspltis_shifted (op) != 0)
6575 return true;
6577 return false;
6580 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6581 result is OP. Abort if it is not possible. */
6584 gen_easy_altivec_constant (rtx op)
6586 machine_mode mode = GET_MODE (op);
6587 int nunits = GET_MODE_NUNITS (mode);
6588 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6589 unsigned step = nunits / 4;
6590 unsigned copies = 1;
6592 /* Start with a vspltisw. */
6593 if (vspltis_constant (op, step, copies))
6594 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6596 /* Then try with a vspltish. */
6597 if (step == 1)
6598 copies <<= 1;
6599 else
6600 step >>= 1;
6602 if (vspltis_constant (op, step, copies))
6603 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6605 /* And finally a vspltisb. */
6606 if (step == 1)
6607 copies <<= 1;
6608 else
6609 step >>= 1;
6611 if (vspltis_constant (op, step, copies))
6612 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6614 gcc_unreachable ();
6617 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6618 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6620 Return the number of instructions needed (1 or 2) into the address pointed
6621 via NUM_INSNS_PTR.
6623 Return the constant that is being split via CONSTANT_PTR. */
6625 bool
6626 xxspltib_constant_p (rtx op,
6627 machine_mode mode,
6628 int *num_insns_ptr,
6629 int *constant_ptr)
6631 size_t nunits = GET_MODE_NUNITS (mode);
6632 size_t i;
6633 HOST_WIDE_INT value;
6634 rtx element;
6636 /* Set the returned values to out of bound values. */
6637 *num_insns_ptr = -1;
6638 *constant_ptr = 256;
6640 if (!TARGET_P9_VECTOR)
6641 return false;
6643 if (mode == VOIDmode)
6644 mode = GET_MODE (op);
6646 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6647 return false;
6649 /* Handle (vec_duplicate <constant>). */
6650 if (GET_CODE (op) == VEC_DUPLICATE)
6652 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6653 && mode != V2DImode)
6654 return false;
6656 element = XEXP (op, 0);
6657 if (!CONST_INT_P (element))
6658 return false;
6660 value = INTVAL (element);
6661 if (!IN_RANGE (value, -128, 127))
6662 return false;
6665 /* Handle (const_vector [...]). */
6666 else if (GET_CODE (op) == CONST_VECTOR)
6668 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6669 && mode != V2DImode)
6670 return false;
6672 element = CONST_VECTOR_ELT (op, 0);
6673 if (!CONST_INT_P (element))
6674 return false;
6676 value = INTVAL (element);
6677 if (!IN_RANGE (value, -128, 127))
6678 return false;
6680 for (i = 1; i < nunits; i++)
6682 element = CONST_VECTOR_ELT (op, i);
6683 if (!CONST_INT_P (element))
6684 return false;
6686 if (value != INTVAL (element))
6687 return false;
6691 /* Handle integer constants being loaded into the upper part of the VSX
6692 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6693 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6694 else if (CONST_INT_P (op))
6696 if (!SCALAR_INT_MODE_P (mode))
6697 return false;
6699 value = INTVAL (op);
6700 if (!IN_RANGE (value, -128, 127))
6701 return false;
6703 if (!IN_RANGE (value, -1, 0))
6705 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6706 return false;
6708 if (EASY_VECTOR_15 (value))
6709 return false;
6713 else
6714 return false;
6716 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6717 sign extend. Special case 0/-1 to allow getting any VSX register instead
6718 of an Altivec register. */
6719 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6720 && EASY_VECTOR_15 (value))
6721 return false;
6723 /* Return # of instructions and the constant byte for XXSPLTIB. */
6724 if (mode == V16QImode)
6725 *num_insns_ptr = 1;
6727 else if (IN_RANGE (value, -1, 0))
6728 *num_insns_ptr = 1;
6730 else
6731 *num_insns_ptr = 2;
6733 *constant_ptr = (int) value;
6734 return true;
6737 const char *
6738 output_vec_const_move (rtx *operands)
6740 int shift;
6741 machine_mode mode;
6742 rtx dest, vec;
6744 dest = operands[0];
6745 vec = operands[1];
6746 mode = GET_MODE (dest);
6748 if (TARGET_VSX)
6750 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6751 int xxspltib_value = 256;
6752 int num_insns = -1;
6754 if (zero_constant (vec, mode))
6756 if (TARGET_P9_VECTOR)
6757 return "xxspltib %x0,0";
6759 else if (dest_vmx_p)
6760 return "vspltisw %0,0";
6762 else
6763 return "xxlxor %x0,%x0,%x0";
6766 if (all_ones_constant (vec, mode))
6768 if (TARGET_P9_VECTOR)
6769 return "xxspltib %x0,255";
6771 else if (dest_vmx_p)
6772 return "vspltisw %0,-1";
6774 else if (TARGET_P8_VECTOR)
6775 return "xxlorc %x0,%x0,%x0";
6777 else
6778 gcc_unreachable ();
6781 if (TARGET_P9_VECTOR
6782 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6784 if (num_insns == 1)
6786 operands[2] = GEN_INT (xxspltib_value & 0xff);
6787 return "xxspltib %x0,%2";
6790 return "#";
6794 if (TARGET_ALTIVEC)
6796 rtx splat_vec;
6798 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6799 if (zero_constant (vec, mode))
6800 return "vspltisw %0,0";
6802 if (all_ones_constant (vec, mode))
6803 return "vspltisw %0,-1";
6805 /* Do we need to construct a value using VSLDOI? */
6806 shift = vspltis_shifted (vec);
6807 if (shift != 0)
6808 return "#";
6810 splat_vec = gen_easy_altivec_constant (vec);
6811 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6812 operands[1] = XEXP (splat_vec, 0);
6813 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6814 return "#";
6816 switch (GET_MODE (splat_vec))
6818 case E_V4SImode:
6819 return "vspltisw %0,%1";
6821 case E_V8HImode:
6822 return "vspltish %0,%1";
6824 case E_V16QImode:
6825 return "vspltisb %0,%1";
6827 default:
6828 gcc_unreachable ();
6832 gcc_unreachable ();
6835 /* Initialize TARGET of vector PAIRED to VALS. */
6837 void
6838 paired_expand_vector_init (rtx target, rtx vals)
6840 machine_mode mode = GET_MODE (target);
6841 int n_elts = GET_MODE_NUNITS (mode);
6842 int n_var = 0;
6843 rtx x, new_rtx, tmp, constant_op, op1, op2;
6844 int i;
6846 for (i = 0; i < n_elts; ++i)
6848 x = XVECEXP (vals, 0, i);
6849 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6850 ++n_var;
6852 if (n_var == 0)
6854 /* Load from constant pool. */
6855 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6856 return;
6859 if (n_var == 2)
6861 /* The vector is initialized only with non-constants. */
6862 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6863 XVECEXP (vals, 0, 1));
6865 emit_move_insn (target, new_rtx);
6866 return;
6869 /* One field is non-constant and the other one is a constant. Load the
6870 constant from the constant pool and use ps_merge instruction to
6871 construct the whole vector. */
6872 op1 = XVECEXP (vals, 0, 0);
6873 op2 = XVECEXP (vals, 0, 1);
6875 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6877 tmp = gen_reg_rtx (GET_MODE (constant_op));
6878 emit_move_insn (tmp, constant_op);
6880 if (CONSTANT_P (op1))
6881 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6882 else
6883 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6885 emit_move_insn (target, new_rtx);
6888 void
6889 paired_expand_vector_move (rtx operands[])
6891 rtx op0 = operands[0], op1 = operands[1];
6893 emit_move_insn (op0, op1);
6896 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6897 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6898 operands for the relation operation COND. This is a recursive
6899 function. */
6901 static void
6902 paired_emit_vector_compare (enum rtx_code rcode,
6903 rtx dest, rtx op0, rtx op1,
6904 rtx cc_op0, rtx cc_op1)
6906 rtx tmp = gen_reg_rtx (V2SFmode);
6907 rtx tmp1, max, min;
6909 gcc_assert (TARGET_PAIRED_FLOAT);
6910 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6912 switch (rcode)
6914 case LT:
6915 case LTU:
6916 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6917 return;
6918 case GE:
6919 case GEU:
6920 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6921 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6922 return;
6923 case LE:
6924 case LEU:
6925 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6926 return;
6927 case GT:
6928 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6929 return;
6930 case EQ:
6931 tmp1 = gen_reg_rtx (V2SFmode);
6932 max = gen_reg_rtx (V2SFmode);
6933 min = gen_reg_rtx (V2SFmode);
6934 gen_reg_rtx (V2SFmode);
6936 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6937 emit_insn (gen_selv2sf4
6938 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6939 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6940 emit_insn (gen_selv2sf4
6941 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6942 emit_insn (gen_subv2sf3 (tmp1, min, max));
6943 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6944 return;
6945 case NE:
6946 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6947 return;
6948 case UNLE:
6949 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6950 return;
6951 case UNLT:
6952 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6953 return;
6954 case UNGE:
6955 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6956 return;
6957 case UNGT:
6958 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6959 return;
6960 default:
6961 gcc_unreachable ();
6964 return;
6967 /* Emit vector conditional expression.
6968 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6969 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6972 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6973 rtx cond, rtx cc_op0, rtx cc_op1)
6975 enum rtx_code rcode = GET_CODE (cond);
6977 if (!TARGET_PAIRED_FLOAT)
6978 return 0;
6980 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6982 return 1;
6985 /* Initialize vector TARGET to VALS. */
6987 void
6988 rs6000_expand_vector_init (rtx target, rtx vals)
6990 machine_mode mode = GET_MODE (target);
6991 machine_mode inner_mode = GET_MODE_INNER (mode);
6992 int n_elts = GET_MODE_NUNITS (mode);
6993 int n_var = 0, one_var = -1;
6994 bool all_same = true, all_const_zero = true;
6995 rtx x, mem;
6996 int i;
6998 for (i = 0; i < n_elts; ++i)
7000 x = XVECEXP (vals, 0, i);
7001 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
7002 ++n_var, one_var = i;
7003 else if (x != CONST0_RTX (inner_mode))
7004 all_const_zero = false;
7006 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
7007 all_same = false;
7010 if (n_var == 0)
7012 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
7013 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
7014 if ((int_vector_p || TARGET_VSX) && all_const_zero)
7016 /* Zero register. */
7017 emit_move_insn (target, CONST0_RTX (mode));
7018 return;
7020 else if (int_vector_p && easy_vector_constant (const_vec, mode))
7022 /* Splat immediate. */
7023 emit_insn (gen_rtx_SET (target, const_vec));
7024 return;
7026 else
7028 /* Load from constant pool. */
7029 emit_move_insn (target, const_vec);
7030 return;
7034 /* Double word values on VSX can use xxpermdi or lxvdsx. */
7035 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
7037 rtx op[2];
7038 size_t i;
7039 size_t num_elements = all_same ? 1 : 2;
7040 for (i = 0; i < num_elements; i++)
7042 op[i] = XVECEXP (vals, 0, i);
7043 /* Just in case there is a SUBREG with a smaller mode, do a
7044 conversion. */
7045 if (GET_MODE (op[i]) != inner_mode)
7047 rtx tmp = gen_reg_rtx (inner_mode);
7048 convert_move (tmp, op[i], 0);
7049 op[i] = tmp;
7051 /* Allow load with splat double word. */
7052 else if (MEM_P (op[i]))
7054 if (!all_same)
7055 op[i] = force_reg (inner_mode, op[i]);
7057 else if (!REG_P (op[i]))
7058 op[i] = force_reg (inner_mode, op[i]);
7061 if (all_same)
7063 if (mode == V2DFmode)
7064 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7065 else
7066 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7068 else
7070 if (mode == V2DFmode)
7071 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7072 else
7073 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7075 return;
7078 /* Special case initializing vector int if we are on 64-bit systems with
7079 direct move or we have the ISA 3.0 instructions. */
7080 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7081 && TARGET_DIRECT_MOVE_64BIT)
7083 if (all_same)
7085 rtx element0 = XVECEXP (vals, 0, 0);
7086 if (MEM_P (element0))
7087 element0 = rs6000_address_for_fpconvert (element0);
7088 else
7089 element0 = force_reg (SImode, element0);
7091 if (TARGET_P9_VECTOR)
7092 emit_insn (gen_vsx_splat_v4si (target, element0));
7093 else
7095 rtx tmp = gen_reg_rtx (DImode);
7096 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7097 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7099 return;
7101 else
7103 rtx elements[4];
7104 size_t i;
7106 for (i = 0; i < 4; i++)
7108 elements[i] = XVECEXP (vals, 0, i);
7109 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7110 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7113 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7114 elements[2], elements[3]));
7115 return;
7119 /* With single precision floating point on VSX, know that internally single
7120 precision is actually represented as a double, and either make 2 V2DF
7121 vectors, and convert these vectors to single precision, or do one
7122 conversion, and splat the result to the other elements. */
7123 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7125 if (all_same)
7127 rtx element0 = XVECEXP (vals, 0, 0);
7129 if (TARGET_P9_VECTOR)
7131 if (MEM_P (element0))
7132 element0 = rs6000_address_for_fpconvert (element0);
7134 emit_insn (gen_vsx_splat_v4sf (target, element0));
7137 else
7139 rtx freg = gen_reg_rtx (V4SFmode);
7140 rtx sreg = force_reg (SFmode, element0);
7141 rtx cvt = (TARGET_XSCVDPSPN
7142 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7143 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7145 emit_insn (cvt);
7146 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7147 const0_rtx));
7150 else
7152 rtx dbl_even = gen_reg_rtx (V2DFmode);
7153 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7154 rtx flt_even = gen_reg_rtx (V4SFmode);
7155 rtx flt_odd = gen_reg_rtx (V4SFmode);
7156 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7157 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7158 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7159 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7161 /* Use VMRGEW if we can instead of doing a permute. */
7162 if (TARGET_P8_VECTOR)
7164 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7165 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7166 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7167 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7168 if (BYTES_BIG_ENDIAN)
7169 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7170 else
7171 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7173 else
7175 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7176 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7177 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7178 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7179 rs6000_expand_extract_even (target, flt_even, flt_odd);
7182 return;
7185 /* Special case initializing vector short/char that are splats if we are on
7186 64-bit systems with direct move. */
7187 if (all_same && TARGET_DIRECT_MOVE_64BIT
7188 && (mode == V16QImode || mode == V8HImode))
7190 rtx op0 = XVECEXP (vals, 0, 0);
7191 rtx di_tmp = gen_reg_rtx (DImode);
7193 if (!REG_P (op0))
7194 op0 = force_reg (GET_MODE_INNER (mode), op0);
7196 if (mode == V16QImode)
7198 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7199 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7200 return;
7203 if (mode == V8HImode)
7205 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7206 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7207 return;
7211 /* Store value to stack temp. Load vector element. Splat. However, splat
7212 of 64-bit items is not supported on Altivec. */
7213 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7215 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7216 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7217 XVECEXP (vals, 0, 0));
7218 x = gen_rtx_UNSPEC (VOIDmode,
7219 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7220 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7221 gen_rtvec (2,
7222 gen_rtx_SET (target, mem),
7223 x)));
7224 x = gen_rtx_VEC_SELECT (inner_mode, target,
7225 gen_rtx_PARALLEL (VOIDmode,
7226 gen_rtvec (1, const0_rtx)));
7227 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7228 return;
7231 /* One field is non-constant. Load constant then overwrite
7232 varying field. */
7233 if (n_var == 1)
7235 rtx copy = copy_rtx (vals);
7237 /* Load constant part of vector, substitute neighboring value for
7238 varying element. */
7239 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7240 rs6000_expand_vector_init (target, copy);
7242 /* Insert variable. */
7243 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7244 return;
7247 /* Construct the vector in memory one field at a time
7248 and load the whole vector. */
7249 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7250 for (i = 0; i < n_elts; i++)
7251 emit_move_insn (adjust_address_nv (mem, inner_mode,
7252 i * GET_MODE_SIZE (inner_mode)),
7253 XVECEXP (vals, 0, i));
7254 emit_move_insn (target, mem);
7257 /* Set field ELT of TARGET to VAL. */
7259 void
7260 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7262 machine_mode mode = GET_MODE (target);
7263 machine_mode inner_mode = GET_MODE_INNER (mode);
7264 rtx reg = gen_reg_rtx (mode);
7265 rtx mask, mem, x;
7266 int width = GET_MODE_SIZE (inner_mode);
7267 int i;
7269 val = force_reg (GET_MODE (val), val);
7271 if (VECTOR_MEM_VSX_P (mode))
7273 rtx insn = NULL_RTX;
7274 rtx elt_rtx = GEN_INT (elt);
7276 if (mode == V2DFmode)
7277 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7279 else if (mode == V2DImode)
7280 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7282 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7284 if (mode == V4SImode)
7285 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7286 else if (mode == V8HImode)
7287 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7288 else if (mode == V16QImode)
7289 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7290 else if (mode == V4SFmode)
7291 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7294 if (insn)
7296 emit_insn (insn);
7297 return;
7301 /* Simplify setting single element vectors like V1TImode. */
7302 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7304 emit_move_insn (target, gen_lowpart (mode, val));
7305 return;
7308 /* Load single variable value. */
7309 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7310 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7311 x = gen_rtx_UNSPEC (VOIDmode,
7312 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7313 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7314 gen_rtvec (2,
7315 gen_rtx_SET (reg, mem),
7316 x)));
7318 /* Linear sequence. */
7319 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7320 for (i = 0; i < 16; ++i)
7321 XVECEXP (mask, 0, i) = GEN_INT (i);
7323 /* Set permute mask to insert element into target. */
7324 for (i = 0; i < width; ++i)
7325 XVECEXP (mask, 0, elt*width + i)
7326 = GEN_INT (i + 0x10);
7327 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7329 if (BYTES_BIG_ENDIAN)
7330 x = gen_rtx_UNSPEC (mode,
7331 gen_rtvec (3, target, reg,
7332 force_reg (V16QImode, x)),
7333 UNSPEC_VPERM);
7334 else
7336 if (TARGET_P9_VECTOR)
7337 x = gen_rtx_UNSPEC (mode,
7338 gen_rtvec (3, target, reg,
7339 force_reg (V16QImode, x)),
7340 UNSPEC_VPERMR);
7341 else
7343 /* Invert selector. We prefer to generate VNAND on P8 so
7344 that future fusion opportunities can kick in, but must
7345 generate VNOR elsewhere. */
7346 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7347 rtx iorx = (TARGET_P8_VECTOR
7348 ? gen_rtx_IOR (V16QImode, notx, notx)
7349 : gen_rtx_AND (V16QImode, notx, notx));
7350 rtx tmp = gen_reg_rtx (V16QImode);
7351 emit_insn (gen_rtx_SET (tmp, iorx));
7353 /* Permute with operands reversed and adjusted selector. */
7354 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7355 UNSPEC_VPERM);
7359 emit_insn (gen_rtx_SET (target, x));
7362 /* Extract field ELT from VEC into TARGET. */
7364 void
7365 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7367 machine_mode mode = GET_MODE (vec);
7368 machine_mode inner_mode = GET_MODE_INNER (mode);
7369 rtx mem;
7371 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7373 switch (mode)
7375 default:
7376 break;
7377 case E_V1TImode:
7378 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7379 emit_move_insn (target, gen_lowpart (TImode, vec));
7380 break;
7381 case E_V2DFmode:
7382 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7383 return;
7384 case E_V2DImode:
7385 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7386 return;
7387 case E_V4SFmode:
7388 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7389 return;
7390 case E_V16QImode:
7391 if (TARGET_DIRECT_MOVE_64BIT)
7393 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7394 return;
7396 else
7397 break;
7398 case E_V8HImode:
7399 if (TARGET_DIRECT_MOVE_64BIT)
7401 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7402 return;
7404 else
7405 break;
7406 case E_V4SImode:
7407 if (TARGET_DIRECT_MOVE_64BIT)
7409 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7410 return;
7412 break;
7415 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7416 && TARGET_DIRECT_MOVE_64BIT)
7418 if (GET_MODE (elt) != DImode)
7420 rtx tmp = gen_reg_rtx (DImode);
7421 convert_move (tmp, elt, 0);
7422 elt = tmp;
7424 else if (!REG_P (elt))
7425 elt = force_reg (DImode, elt);
7427 switch (mode)
7429 case E_V2DFmode:
7430 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7431 return;
7433 case E_V2DImode:
7434 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7435 return;
7437 case E_V4SFmode:
7438 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7439 return;
7441 case E_V4SImode:
7442 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7443 return;
7445 case E_V8HImode:
7446 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7447 return;
7449 case E_V16QImode:
7450 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7451 return;
7453 default:
7454 gcc_unreachable ();
7458 gcc_assert (CONST_INT_P (elt));
7460 /* Allocate mode-sized buffer. */
7461 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7463 emit_move_insn (mem, vec);
7465 /* Add offset to field within buffer matching vector element. */
7466 mem = adjust_address_nv (mem, inner_mode,
7467 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7469 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7472 /* Helper function to return the register number of a RTX. */
7473 static inline int
7474 regno_or_subregno (rtx op)
7476 if (REG_P (op))
7477 return REGNO (op);
7478 else if (SUBREG_P (op))
7479 return subreg_regno (op);
7480 else
7481 gcc_unreachable ();
7484 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7485 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7486 temporary (BASE_TMP) to fixup the address. Return the new memory address
7487 that is valid for reads or writes to a given register (SCALAR_REG). */
7490 rs6000_adjust_vec_address (rtx scalar_reg,
7491 rtx mem,
7492 rtx element,
7493 rtx base_tmp,
7494 machine_mode scalar_mode)
7496 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7497 rtx addr = XEXP (mem, 0);
7498 rtx element_offset;
7499 rtx new_addr;
7500 bool valid_addr_p;
7502 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7503 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7505 /* Calculate what we need to add to the address to get the element
7506 address. */
7507 if (CONST_INT_P (element))
7508 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7509 else
7511 int byte_shift = exact_log2 (scalar_size);
7512 gcc_assert (byte_shift >= 0);
7514 if (byte_shift == 0)
7515 element_offset = element;
7517 else
7519 if (TARGET_POWERPC64)
7520 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7521 else
7522 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7524 element_offset = base_tmp;
7528 /* Create the new address pointing to the element within the vector. If we
7529 are adding 0, we don't have to change the address. */
7530 if (element_offset == const0_rtx)
7531 new_addr = addr;
7533 /* A simple indirect address can be converted into a reg + offset
7534 address. */
7535 else if (REG_P (addr) || SUBREG_P (addr))
7536 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7538 /* Optimize D-FORM addresses with constant offset with a constant element, to
7539 include the element offset in the address directly. */
7540 else if (GET_CODE (addr) == PLUS)
7542 rtx op0 = XEXP (addr, 0);
7543 rtx op1 = XEXP (addr, 1);
7544 rtx insn;
7546 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7547 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7549 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7550 rtx offset_rtx = GEN_INT (offset);
7552 if (IN_RANGE (offset, -32768, 32767)
7553 && (scalar_size < 8 || (offset & 0x3) == 0))
7554 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7555 else
7557 emit_move_insn (base_tmp, offset_rtx);
7558 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7561 else
7563 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7564 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7566 /* Note, ADDI requires the register being added to be a base
7567 register. If the register was R0, load it up into the temporary
7568 and do the add. */
7569 if (op1_reg_p
7570 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7572 insn = gen_add3_insn (base_tmp, op1, element_offset);
7573 gcc_assert (insn != NULL_RTX);
7574 emit_insn (insn);
7577 else if (ele_reg_p
7578 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7580 insn = gen_add3_insn (base_tmp, element_offset, op1);
7581 gcc_assert (insn != NULL_RTX);
7582 emit_insn (insn);
7585 else
7587 emit_move_insn (base_tmp, op1);
7588 emit_insn (gen_add2_insn (base_tmp, element_offset));
7591 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7595 else
7597 emit_move_insn (base_tmp, addr);
7598 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7601 /* If we have a PLUS, we need to see whether the particular register class
7602 allows for D-FORM or X-FORM addressing. */
7603 if (GET_CODE (new_addr) == PLUS)
7605 rtx op1 = XEXP (new_addr, 1);
7606 addr_mask_type addr_mask;
7607 int scalar_regno = regno_or_subregno (scalar_reg);
7609 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7610 if (INT_REGNO_P (scalar_regno))
7611 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7613 else if (FP_REGNO_P (scalar_regno))
7614 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7616 else if (ALTIVEC_REGNO_P (scalar_regno))
7617 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7619 else
7620 gcc_unreachable ();
7622 if (REG_P (op1) || SUBREG_P (op1))
7623 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7624 else
7625 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7628 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7629 valid_addr_p = true;
7631 else
7632 valid_addr_p = false;
7634 if (!valid_addr_p)
7636 emit_move_insn (base_tmp, new_addr);
7637 new_addr = base_tmp;
7640 return change_address (mem, scalar_mode, new_addr);
7643 /* Split a variable vec_extract operation into the component instructions. */
7645 void
7646 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7647 rtx tmp_altivec)
7649 machine_mode mode = GET_MODE (src);
7650 machine_mode scalar_mode = GET_MODE (dest);
7651 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7652 int byte_shift = exact_log2 (scalar_size);
7654 gcc_assert (byte_shift >= 0);
7656 /* If we are given a memory address, optimize to load just the element. We
7657 don't have to adjust the vector element number on little endian
7658 systems. */
7659 if (MEM_P (src))
7661 gcc_assert (REG_P (tmp_gpr));
7662 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7663 tmp_gpr, scalar_mode));
7664 return;
7667 else if (REG_P (src) || SUBREG_P (src))
7669 int bit_shift = byte_shift + 3;
7670 rtx element2;
7671 int dest_regno = regno_or_subregno (dest);
7672 int src_regno = regno_or_subregno (src);
7673 int element_regno = regno_or_subregno (element);
7675 gcc_assert (REG_P (tmp_gpr));
7677 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7678 a general purpose register. */
7679 if (TARGET_P9_VECTOR
7680 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7681 && INT_REGNO_P (dest_regno)
7682 && ALTIVEC_REGNO_P (src_regno)
7683 && INT_REGNO_P (element_regno))
7685 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7686 rtx element_si = gen_rtx_REG (SImode, element_regno);
7688 if (mode == V16QImode)
7689 emit_insn (VECTOR_ELT_ORDER_BIG
7690 ? gen_vextublx (dest_si, element_si, src)
7691 : gen_vextubrx (dest_si, element_si, src));
7693 else if (mode == V8HImode)
7695 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7696 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7697 emit_insn (VECTOR_ELT_ORDER_BIG
7698 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7699 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7703 else
7705 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7706 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7707 emit_insn (VECTOR_ELT_ORDER_BIG
7708 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7709 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7712 return;
7716 gcc_assert (REG_P (tmp_altivec));
7718 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7719 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7720 will shift the element into the upper position (adding 3 to convert a
7721 byte shift into a bit shift). */
7722 if (scalar_size == 8)
7724 if (!VECTOR_ELT_ORDER_BIG)
7726 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7727 element2 = tmp_gpr;
7729 else
7730 element2 = element;
7732 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7733 bit. */
7734 emit_insn (gen_rtx_SET (tmp_gpr,
7735 gen_rtx_AND (DImode,
7736 gen_rtx_ASHIFT (DImode,
7737 element2,
7738 GEN_INT (6)),
7739 GEN_INT (64))));
7741 else
7743 if (!VECTOR_ELT_ORDER_BIG)
7745 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7747 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7748 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7749 element2 = tmp_gpr;
7751 else
7752 element2 = element;
7754 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7757 /* Get the value into the lower byte of the Altivec register where VSLO
7758 expects it. */
7759 if (TARGET_P9_VECTOR)
7760 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7761 else if (can_create_pseudo_p ())
7762 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7763 else
7765 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7766 emit_move_insn (tmp_di, tmp_gpr);
7767 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7770 /* Do the VSLO to get the value into the final location. */
7771 switch (mode)
7773 case E_V2DFmode:
7774 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7775 return;
7777 case E_V2DImode:
7778 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7779 return;
7781 case E_V4SFmode:
7783 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7784 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7785 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7786 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7787 tmp_altivec));
7789 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7790 return;
7793 case E_V4SImode:
7794 case E_V8HImode:
7795 case E_V16QImode:
7797 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7798 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7799 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7800 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7801 tmp_altivec));
7802 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7803 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7804 GEN_INT (64 - (8 * scalar_size))));
7805 return;
7808 default:
7809 gcc_unreachable ();
7812 return;
7814 else
7815 gcc_unreachable ();
7818 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7819 two SImode values. */
7821 static void
7822 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7824 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7826 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7828 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7829 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7831 emit_move_insn (dest, GEN_INT (const1 | const2));
7832 return;
7835 /* Put si1 into upper 32-bits of dest. */
7836 if (CONST_INT_P (si1))
7837 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7838 else
7840 /* Generate RLDIC. */
7841 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7842 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7843 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7844 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7845 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7846 emit_insn (gen_rtx_SET (dest, and_rtx));
7849 /* Put si2 into the temporary. */
7850 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7851 if (CONST_INT_P (si2))
7852 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7853 else
7854 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7856 /* Combine the two parts. */
7857 emit_insn (gen_iordi3 (dest, dest, tmp));
7858 return;
7861 /* Split a V4SI initialization. */
7863 void
7864 rs6000_split_v4si_init (rtx operands[])
7866 rtx dest = operands[0];
7868 /* Destination is a GPR, build up the two DImode parts in place. */
7869 if (REG_P (dest) || SUBREG_P (dest))
7871 int d_regno = regno_or_subregno (dest);
7872 rtx scalar1 = operands[1];
7873 rtx scalar2 = operands[2];
7874 rtx scalar3 = operands[3];
7875 rtx scalar4 = operands[4];
7876 rtx tmp1 = operands[5];
7877 rtx tmp2 = operands[6];
7879 /* Even though we only need one temporary (plus the destination, which
7880 has an early clobber constraint, try to use two temporaries, one for
7881 each double word created. That way the 2nd insn scheduling pass can
7882 rearrange things so the two parts are done in parallel. */
7883 if (BYTES_BIG_ENDIAN)
7885 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7886 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7887 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7888 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7890 else
7892 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7893 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7894 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7895 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7896 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7898 return;
7901 else
7902 gcc_unreachable ();
7905 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7906 selects whether the alignment is abi mandated, optional, or
7907 both abi and optional alignment. */
7909 unsigned int
7910 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7912 if (how != align_opt)
7914 if (TREE_CODE (type) == VECTOR_TYPE)
7916 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7918 if (align < 64)
7919 align = 64;
7921 else if (align < 128)
7922 align = 128;
7926 if (how != align_abi)
7928 if (TREE_CODE (type) == ARRAY_TYPE
7929 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7931 if (align < BITS_PER_WORD)
7932 align = BITS_PER_WORD;
7936 return align;
7939 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7941 bool
7942 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7944 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7946 if (computed != 128)
7948 static bool warned;
7949 if (!warned && warn_psabi)
7951 warned = true;
7952 inform (input_location,
7953 "the layout of aggregates containing vectors with"
7954 " %d-byte alignment has changed in GCC 5",
7955 computed / BITS_PER_UNIT);
7958 /* In current GCC there is no special case. */
7959 return false;
7962 return false;
7965 /* AIX increases natural record alignment to doubleword if the first
7966 field is an FP double while the FP fields remain word aligned. */
7968 unsigned int
7969 rs6000_special_round_type_align (tree type, unsigned int computed,
7970 unsigned int specified)
7972 unsigned int align = MAX (computed, specified);
7973 tree field = TYPE_FIELDS (type);
7975 /* Skip all non field decls */
7976 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7977 field = DECL_CHAIN (field);
7979 if (field != NULL && field != type)
7981 type = TREE_TYPE (field);
7982 while (TREE_CODE (type) == ARRAY_TYPE)
7983 type = TREE_TYPE (type);
7985 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7986 align = MAX (align, 64);
7989 return align;
7992 /* Darwin increases record alignment to the natural alignment of
7993 the first field. */
7995 unsigned int
7996 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7997 unsigned int specified)
7999 unsigned int align = MAX (computed, specified);
8001 if (TYPE_PACKED (type))
8002 return align;
8004 /* Find the first field, looking down into aggregates. */
8005 do {
8006 tree field = TYPE_FIELDS (type);
8007 /* Skip all non field decls */
8008 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
8009 field = DECL_CHAIN (field);
8010 if (! field)
8011 break;
8012 /* A packed field does not contribute any extra alignment. */
8013 if (DECL_PACKED (field))
8014 return align;
8015 type = TREE_TYPE (field);
8016 while (TREE_CODE (type) == ARRAY_TYPE)
8017 type = TREE_TYPE (type);
8018 } while (AGGREGATE_TYPE_P (type));
8020 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
8021 align = MAX (align, TYPE_ALIGN (type));
8023 return align;
8026 /* Return 1 for an operand in small memory on V.4/eabi. */
8029 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8030 machine_mode mode ATTRIBUTE_UNUSED)
8032 #if TARGET_ELF
8033 rtx sym_ref;
8035 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8036 return 0;
8038 if (DEFAULT_ABI != ABI_V4)
8039 return 0;
8041 if (GET_CODE (op) == SYMBOL_REF)
8042 sym_ref = op;
8044 else if (GET_CODE (op) != CONST
8045 || GET_CODE (XEXP (op, 0)) != PLUS
8046 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8047 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8048 return 0;
8050 else
8052 rtx sum = XEXP (op, 0);
8053 HOST_WIDE_INT summand;
8055 /* We have to be careful here, because it is the referenced address
8056 that must be 32k from _SDA_BASE_, not just the symbol. */
8057 summand = INTVAL (XEXP (sum, 1));
8058 if (summand < 0 || summand > g_switch_value)
8059 return 0;
8061 sym_ref = XEXP (sum, 0);
8064 return SYMBOL_REF_SMALL_P (sym_ref);
8065 #else
8066 return 0;
8067 #endif
8070 /* Return true if either operand is a general purpose register. */
8072 bool
8073 gpr_or_gpr_p (rtx op0, rtx op1)
8075 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8076 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8079 /* Return true if this is a move direct operation between GPR registers and
8080 floating point/VSX registers. */
8082 bool
8083 direct_move_p (rtx op0, rtx op1)
8085 int regno0, regno1;
8087 if (!REG_P (op0) || !REG_P (op1))
8088 return false;
8090 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8091 return false;
8093 regno0 = REGNO (op0);
8094 regno1 = REGNO (op1);
8095 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8096 return false;
8098 if (INT_REGNO_P (regno0))
8099 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8101 else if (INT_REGNO_P (regno1))
8103 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8104 return true;
8106 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8107 return true;
8110 return false;
8113 /* Return true if the OFFSET is valid for the quad address instructions that
8114 use d-form (register + offset) addressing. */
8116 static inline bool
8117 quad_address_offset_p (HOST_WIDE_INT offset)
8119 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8122 /* Return true if the ADDR is an acceptable address for a quad memory
8123 operation of mode MODE (either LQ/STQ for general purpose registers, or
8124 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8125 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8126 3.0 LXV/STXV instruction. */
8128 bool
8129 quad_address_p (rtx addr, machine_mode mode, bool strict)
8131 rtx op0, op1;
8133 if (GET_MODE_SIZE (mode) != 16)
8134 return false;
8136 if (legitimate_indirect_address_p (addr, strict))
8137 return true;
8139 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8140 return false;
8142 if (GET_CODE (addr) != PLUS)
8143 return false;
8145 op0 = XEXP (addr, 0);
8146 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8147 return false;
8149 op1 = XEXP (addr, 1);
8150 if (!CONST_INT_P (op1))
8151 return false;
8153 return quad_address_offset_p (INTVAL (op1));
8156 /* Return true if this is a load or store quad operation. This function does
8157 not handle the atomic quad memory instructions. */
8159 bool
8160 quad_load_store_p (rtx op0, rtx op1)
8162 bool ret;
8164 if (!TARGET_QUAD_MEMORY)
8165 ret = false;
8167 else if (REG_P (op0) && MEM_P (op1))
8168 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8169 && quad_memory_operand (op1, GET_MODE (op1))
8170 && !reg_overlap_mentioned_p (op0, op1));
8172 else if (MEM_P (op0) && REG_P (op1))
8173 ret = (quad_memory_operand (op0, GET_MODE (op0))
8174 && quad_int_reg_operand (op1, GET_MODE (op1)));
8176 else
8177 ret = false;
8179 if (TARGET_DEBUG_ADDR)
8181 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8182 ret ? "true" : "false");
8183 debug_rtx (gen_rtx_SET (op0, op1));
8186 return ret;
8189 /* Given an address, return a constant offset term if one exists. */
8191 static rtx
8192 address_offset (rtx op)
8194 if (GET_CODE (op) == PRE_INC
8195 || GET_CODE (op) == PRE_DEC)
8196 op = XEXP (op, 0);
8197 else if (GET_CODE (op) == PRE_MODIFY
8198 || GET_CODE (op) == LO_SUM)
8199 op = XEXP (op, 1);
8201 if (GET_CODE (op) == CONST)
8202 op = XEXP (op, 0);
8204 if (GET_CODE (op) == PLUS)
8205 op = XEXP (op, 1);
8207 if (CONST_INT_P (op))
8208 return op;
8210 return NULL_RTX;
8213 /* Return true if the MEM operand is a memory operand suitable for use
8214 with a (full width, possibly multiple) gpr load/store. On
8215 powerpc64 this means the offset must be divisible by 4.
8216 Implements 'Y' constraint.
8218 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8219 a constraint function we know the operand has satisfied a suitable
8220 memory predicate. Also accept some odd rtl generated by reload
8221 (see rs6000_legitimize_reload_address for various forms). It is
8222 important that reload rtl be accepted by appropriate constraints
8223 but not by the operand predicate.
8225 Offsetting a lo_sum should not be allowed, except where we know by
8226 alignment that a 32k boundary is not crossed, but see the ???
8227 comment in rs6000_legitimize_reload_address. Note that by
8228 "offsetting" here we mean a further offset to access parts of the
8229 MEM. It's fine to have a lo_sum where the inner address is offset
8230 from a sym, since the same sym+offset will appear in the high part
8231 of the address calculation. */
8233 bool
8234 mem_operand_gpr (rtx op, machine_mode mode)
8236 unsigned HOST_WIDE_INT offset;
8237 int extra;
8238 rtx addr = XEXP (op, 0);
8240 op = address_offset (addr);
8241 if (op == NULL_RTX)
8242 return true;
8244 offset = INTVAL (op);
8245 if (TARGET_POWERPC64 && (offset & 3) != 0)
8246 return false;
8248 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8249 if (extra < 0)
8250 extra = 0;
8252 if (GET_CODE (addr) == LO_SUM)
8253 /* For lo_sum addresses, we must allow any offset except one that
8254 causes a wrap, so test only the low 16 bits. */
8255 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8257 return offset + 0x8000 < 0x10000u - extra;
8260 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8261 enforce an offset divisible by 4 even for 32-bit. */
8263 bool
8264 mem_operand_ds_form (rtx op, machine_mode mode)
8266 unsigned HOST_WIDE_INT offset;
8267 int extra;
8268 rtx addr = XEXP (op, 0);
8270 if (!offsettable_address_p (false, mode, addr))
8271 return false;
8273 op = address_offset (addr);
8274 if (op == NULL_RTX)
8275 return true;
8277 offset = INTVAL (op);
8278 if ((offset & 3) != 0)
8279 return false;
8281 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8282 if (extra < 0)
8283 extra = 0;
8285 if (GET_CODE (addr) == LO_SUM)
8286 /* For lo_sum addresses, we must allow any offset except one that
8287 causes a wrap, so test only the low 16 bits. */
8288 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8290 return offset + 0x8000 < 0x10000u - extra;
8293 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8295 static bool
8296 reg_offset_addressing_ok_p (machine_mode mode)
8298 switch (mode)
8300 case E_V16QImode:
8301 case E_V8HImode:
8302 case E_V4SFmode:
8303 case E_V4SImode:
8304 case E_V2DFmode:
8305 case E_V2DImode:
8306 case E_V1TImode:
8307 case E_TImode:
8308 case E_TFmode:
8309 case E_KFmode:
8310 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8311 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8312 a vector mode, if we want to use the VSX registers to move it around,
8313 we need to restrict ourselves to reg+reg addressing. Similarly for
8314 IEEE 128-bit floating point that is passed in a single vector
8315 register. */
8316 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8317 return mode_supports_vsx_dform_quad (mode);
8318 break;
8320 case E_V2SImode:
8321 case E_V2SFmode:
8322 /* Paired vector modes. Only reg+reg addressing is valid. */
8323 if (TARGET_PAIRED_FLOAT)
8324 return false;
8325 break;
8327 case E_SDmode:
8328 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8329 addressing for the LFIWZX and STFIWX instructions. */
8330 if (TARGET_NO_SDMODE_STACK)
8331 return false;
8332 break;
8334 default:
8335 break;
8338 return true;
8341 static bool
8342 virtual_stack_registers_memory_p (rtx op)
8344 int regnum;
8346 if (GET_CODE (op) == REG)
8347 regnum = REGNO (op);
8349 else if (GET_CODE (op) == PLUS
8350 && GET_CODE (XEXP (op, 0)) == REG
8351 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8352 regnum = REGNO (XEXP (op, 0));
8354 else
8355 return false;
8357 return (regnum >= FIRST_VIRTUAL_REGISTER
8358 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8361 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8362 is known to not straddle a 32k boundary. This function is used
8363 to determine whether -mcmodel=medium code can use TOC pointer
8364 relative addressing for OP. This means the alignment of the TOC
8365 pointer must also be taken into account, and unfortunately that is
8366 only 8 bytes. */
8368 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8369 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8370 #endif
8372 static bool
8373 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8374 machine_mode mode)
8376 tree decl;
8377 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8379 if (GET_CODE (op) != SYMBOL_REF)
8380 return false;
8382 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8383 SYMBOL_REF. */
8384 if (mode_supports_vsx_dform_quad (mode))
8385 return false;
8387 dsize = GET_MODE_SIZE (mode);
8388 decl = SYMBOL_REF_DECL (op);
8389 if (!decl)
8391 if (dsize == 0)
8392 return false;
8394 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8395 replacing memory addresses with an anchor plus offset. We
8396 could find the decl by rummaging around in the block->objects
8397 VEC for the given offset but that seems like too much work. */
8398 dalign = BITS_PER_UNIT;
8399 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8400 && SYMBOL_REF_ANCHOR_P (op)
8401 && SYMBOL_REF_BLOCK (op) != NULL)
8403 struct object_block *block = SYMBOL_REF_BLOCK (op);
8405 dalign = block->alignment;
8406 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8408 else if (CONSTANT_POOL_ADDRESS_P (op))
8410 /* It would be nice to have get_pool_align().. */
8411 machine_mode cmode = get_pool_mode (op);
8413 dalign = GET_MODE_ALIGNMENT (cmode);
8416 else if (DECL_P (decl))
8418 dalign = DECL_ALIGN (decl);
8420 if (dsize == 0)
8422 /* Allow BLKmode when the entire object is known to not
8423 cross a 32k boundary. */
8424 if (!DECL_SIZE_UNIT (decl))
8425 return false;
8427 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8428 return false;
8430 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8431 if (dsize > 32768)
8432 return false;
8434 dalign /= BITS_PER_UNIT;
8435 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8436 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8437 return dalign >= dsize;
8440 else
8441 gcc_unreachable ();
8443 /* Find how many bits of the alignment we know for this access. */
8444 dalign /= BITS_PER_UNIT;
8445 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8446 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8447 mask = dalign - 1;
8448 lsb = offset & -offset;
8449 mask &= lsb - 1;
8450 dalign = mask + 1;
8452 return dalign >= dsize;
8455 static bool
8456 constant_pool_expr_p (rtx op)
8458 rtx base, offset;
8460 split_const (op, &base, &offset);
8461 return (GET_CODE (base) == SYMBOL_REF
8462 && CONSTANT_POOL_ADDRESS_P (base)
8463 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8466 /* These are only used to pass through from print_operand/print_operand_address
8467 to rs6000_output_addr_const_extra over the intervening function
8468 output_addr_const which is not target code. */
8469 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8471 /* Return true if OP is a toc pointer relative address (the output
8472 of create_TOC_reference). If STRICT, do not match non-split
8473 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8474 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8475 TOCREL_OFFSET_RET respectively. */
8477 bool
8478 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8479 const_rtx *tocrel_offset_ret)
8481 if (!TARGET_TOC)
8482 return false;
8484 if (TARGET_CMODEL != CMODEL_SMALL)
8486 /* When strict ensure we have everything tidy. */
8487 if (strict
8488 && !(GET_CODE (op) == LO_SUM
8489 && REG_P (XEXP (op, 0))
8490 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8491 return false;
8493 /* When not strict, allow non-split TOC addresses and also allow
8494 (lo_sum (high ..)) TOC addresses created during reload. */
8495 if (GET_CODE (op) == LO_SUM)
8496 op = XEXP (op, 1);
8499 const_rtx tocrel_base = op;
8500 const_rtx tocrel_offset = const0_rtx;
8502 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8504 tocrel_base = XEXP (op, 0);
8505 tocrel_offset = XEXP (op, 1);
8508 if (tocrel_base_ret)
8509 *tocrel_base_ret = tocrel_base;
8510 if (tocrel_offset_ret)
8511 *tocrel_offset_ret = tocrel_offset;
8513 return (GET_CODE (tocrel_base) == UNSPEC
8514 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8517 /* Return true if X is a constant pool address, and also for cmodel=medium
8518 if X is a toc-relative address known to be offsettable within MODE. */
8520 bool
8521 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8522 bool strict)
8524 const_rtx tocrel_base, tocrel_offset;
8525 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8526 && (TARGET_CMODEL != CMODEL_MEDIUM
8527 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8528 || mode == QImode
8529 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8530 INTVAL (tocrel_offset), mode)));
8533 static bool
8534 legitimate_small_data_p (machine_mode mode, rtx x)
8536 return (DEFAULT_ABI == ABI_V4
8537 && !flag_pic && !TARGET_TOC
8538 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8539 && small_data_operand (x, mode));
8542 bool
8543 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8544 bool strict, bool worst_case)
8546 unsigned HOST_WIDE_INT offset;
8547 unsigned int extra;
8549 if (GET_CODE (x) != PLUS)
8550 return false;
8551 if (!REG_P (XEXP (x, 0)))
8552 return false;
8553 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8554 return false;
8555 if (mode_supports_vsx_dform_quad (mode))
8556 return quad_address_p (x, mode, strict);
8557 if (!reg_offset_addressing_ok_p (mode))
8558 return virtual_stack_registers_memory_p (x);
8559 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8560 return true;
8561 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8562 return false;
8564 offset = INTVAL (XEXP (x, 1));
8565 extra = 0;
8566 switch (mode)
8568 case E_V2SImode:
8569 case E_V2SFmode:
8570 /* Paired single modes: offset addressing isn't valid. */
8571 return false;
8573 case E_DFmode:
8574 case E_DDmode:
8575 case E_DImode:
8576 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8577 addressing. */
8578 if (VECTOR_MEM_VSX_P (mode))
8579 return false;
8581 if (!worst_case)
8582 break;
8583 if (!TARGET_POWERPC64)
8584 extra = 4;
8585 else if (offset & 3)
8586 return false;
8587 break;
8589 case E_TFmode:
8590 case E_IFmode:
8591 case E_KFmode:
8592 case E_TDmode:
8593 case E_TImode:
8594 case E_PTImode:
8595 extra = 8;
8596 if (!worst_case)
8597 break;
8598 if (!TARGET_POWERPC64)
8599 extra = 12;
8600 else if (offset & 3)
8601 return false;
8602 break;
8604 default:
8605 break;
8608 offset += 0x8000;
8609 return offset < 0x10000 - extra;
8612 bool
8613 legitimate_indexed_address_p (rtx x, int strict)
8615 rtx op0, op1;
8617 if (GET_CODE (x) != PLUS)
8618 return false;
8620 op0 = XEXP (x, 0);
8621 op1 = XEXP (x, 1);
8623 return (REG_P (op0) && REG_P (op1)
8624 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8625 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8626 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8627 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8630 bool
8631 avoiding_indexed_address_p (machine_mode mode)
8633 /* Avoid indexed addressing for modes that have non-indexed
8634 load/store instruction forms. */
8635 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8638 bool
8639 legitimate_indirect_address_p (rtx x, int strict)
8641 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8644 bool
8645 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8647 if (!TARGET_MACHO || !flag_pic
8648 || mode != SImode || GET_CODE (x) != MEM)
8649 return false;
8650 x = XEXP (x, 0);
8652 if (GET_CODE (x) != LO_SUM)
8653 return false;
8654 if (GET_CODE (XEXP (x, 0)) != REG)
8655 return false;
8656 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8657 return false;
8658 x = XEXP (x, 1);
8660 return CONSTANT_P (x);
8663 static bool
8664 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8666 if (GET_CODE (x) != LO_SUM)
8667 return false;
8668 if (GET_CODE (XEXP (x, 0)) != REG)
8669 return false;
8670 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8671 return false;
8672 /* quad word addresses are restricted, and we can't use LO_SUM. */
8673 if (mode_supports_vsx_dform_quad (mode))
8674 return false;
8675 x = XEXP (x, 1);
8677 if (TARGET_ELF || TARGET_MACHO)
8679 bool large_toc_ok;
8681 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8682 return false;
8683 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8684 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8685 recognizes some LO_SUM addresses as valid although this
8686 function says opposite. In most cases, LRA through different
8687 transformations can generate correct code for address reloads.
8688 It can not manage only some LO_SUM cases. So we need to add
8689 code analogous to one in rs6000_legitimize_reload_address for
8690 LOW_SUM here saying that some addresses are still valid. */
8691 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8692 && small_toc_ref (x, VOIDmode));
8693 if (TARGET_TOC && ! large_toc_ok)
8694 return false;
8695 if (GET_MODE_NUNITS (mode) != 1)
8696 return false;
8697 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8698 && !(/* ??? Assume floating point reg based on mode? */
8699 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8700 && (mode == DFmode || mode == DDmode)))
8701 return false;
8703 return CONSTANT_P (x) || large_toc_ok;
8706 return false;
8710 /* Try machine-dependent ways of modifying an illegitimate address
8711 to be legitimate. If we find one, return the new, valid address.
8712 This is used from only one place: `memory_address' in explow.c.
8714 OLDX is the address as it was before break_out_memory_refs was
8715 called. In some cases it is useful to look at this to decide what
8716 needs to be done.
8718 It is always safe for this function to do nothing. It exists to
8719 recognize opportunities to optimize the output.
8721 On RS/6000, first check for the sum of a register with a constant
8722 integer that is out of range. If so, generate code to add the
8723 constant with the low-order 16 bits masked to the register and force
8724 this result into another register (this can be done with `cau').
8725 Then generate an address of REG+(CONST&0xffff), allowing for the
8726 possibility of bit 16 being a one.
8728 Then check for the sum of a register and something not constant, try to
8729 load the other things into a register and return the sum. */
8731 static rtx
8732 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8733 machine_mode mode)
8735 unsigned int extra;
8737 if (!reg_offset_addressing_ok_p (mode)
8738 || mode_supports_vsx_dform_quad (mode))
8740 if (virtual_stack_registers_memory_p (x))
8741 return x;
8743 /* In theory we should not be seeing addresses of the form reg+0,
8744 but just in case it is generated, optimize it away. */
8745 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8746 return force_reg (Pmode, XEXP (x, 0));
8748 /* For TImode with load/store quad, restrict addresses to just a single
8749 pointer, so it works with both GPRs and VSX registers. */
8750 /* Make sure both operands are registers. */
8751 else if (GET_CODE (x) == PLUS
8752 && (mode != TImode || !TARGET_VSX))
8753 return gen_rtx_PLUS (Pmode,
8754 force_reg (Pmode, XEXP (x, 0)),
8755 force_reg (Pmode, XEXP (x, 1)));
8756 else
8757 return force_reg (Pmode, x);
8759 if (GET_CODE (x) == SYMBOL_REF)
8761 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8762 if (model != 0)
8763 return rs6000_legitimize_tls_address (x, model);
8766 extra = 0;
8767 switch (mode)
8769 case E_TFmode:
8770 case E_TDmode:
8771 case E_TImode:
8772 case E_PTImode:
8773 case E_IFmode:
8774 case E_KFmode:
8775 /* As in legitimate_offset_address_p we do not assume
8776 worst-case. The mode here is just a hint as to the registers
8777 used. A TImode is usually in gprs, but may actually be in
8778 fprs. Leave worst-case scenario for reload to handle via
8779 insn constraints. PTImode is only GPRs. */
8780 extra = 8;
8781 break;
8782 default:
8783 break;
8786 if (GET_CODE (x) == PLUS
8787 && GET_CODE (XEXP (x, 0)) == REG
8788 && GET_CODE (XEXP (x, 1)) == CONST_INT
8789 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8790 >= 0x10000 - extra)
8791 && !PAIRED_VECTOR_MODE (mode))
8793 HOST_WIDE_INT high_int, low_int;
8794 rtx sum;
8795 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8796 if (low_int >= 0x8000 - extra)
8797 low_int = 0;
8798 high_int = INTVAL (XEXP (x, 1)) - low_int;
8799 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8800 GEN_INT (high_int)), 0);
8801 return plus_constant (Pmode, sum, low_int);
8803 else if (GET_CODE (x) == PLUS
8804 && GET_CODE (XEXP (x, 0)) == REG
8805 && GET_CODE (XEXP (x, 1)) != CONST_INT
8806 && GET_MODE_NUNITS (mode) == 1
8807 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8808 || (/* ??? Assume floating point reg based on mode? */
8809 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8810 && (mode == DFmode || mode == DDmode)))
8811 && !avoiding_indexed_address_p (mode))
8813 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8814 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8816 else if (PAIRED_VECTOR_MODE (mode))
8818 if (mode == DImode)
8819 return x;
8820 /* We accept [reg + reg]. */
8822 if (GET_CODE (x) == PLUS)
8824 rtx op1 = XEXP (x, 0);
8825 rtx op2 = XEXP (x, 1);
8826 rtx y;
8828 op1 = force_reg (Pmode, op1);
8829 op2 = force_reg (Pmode, op2);
8831 /* We can't always do [reg + reg] for these, because [reg +
8832 reg + offset] is not a legitimate addressing mode. */
8833 y = gen_rtx_PLUS (Pmode, op1, op2);
8835 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8836 return force_reg (Pmode, y);
8837 else
8838 return y;
8841 return force_reg (Pmode, x);
8843 else if ((TARGET_ELF
8844 #if TARGET_MACHO
8845 || !MACHO_DYNAMIC_NO_PIC_P
8846 #endif
8848 && TARGET_32BIT
8849 && TARGET_NO_TOC
8850 && ! flag_pic
8851 && GET_CODE (x) != CONST_INT
8852 && GET_CODE (x) != CONST_WIDE_INT
8853 && GET_CODE (x) != CONST_DOUBLE
8854 && CONSTANT_P (x)
8855 && GET_MODE_NUNITS (mode) == 1
8856 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8857 || (/* ??? Assume floating point reg based on mode? */
8858 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8859 && (mode == DFmode || mode == DDmode))))
8861 rtx reg = gen_reg_rtx (Pmode);
8862 if (TARGET_ELF)
8863 emit_insn (gen_elf_high (reg, x));
8864 else
8865 emit_insn (gen_macho_high (reg, x));
8866 return gen_rtx_LO_SUM (Pmode, reg, x);
8868 else if (TARGET_TOC
8869 && GET_CODE (x) == SYMBOL_REF
8870 && constant_pool_expr_p (x)
8871 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8872 return create_TOC_reference (x, NULL_RTX);
8873 else
8874 return x;
8877 /* Debug version of rs6000_legitimize_address. */
8878 static rtx
8879 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8881 rtx ret;
8882 rtx_insn *insns;
8884 start_sequence ();
8885 ret = rs6000_legitimize_address (x, oldx, mode);
8886 insns = get_insns ();
8887 end_sequence ();
8889 if (ret != x)
8891 fprintf (stderr,
8892 "\nrs6000_legitimize_address: mode %s, old code %s, "
8893 "new code %s, modified\n",
8894 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8895 GET_RTX_NAME (GET_CODE (ret)));
8897 fprintf (stderr, "Original address:\n");
8898 debug_rtx (x);
8900 fprintf (stderr, "oldx:\n");
8901 debug_rtx (oldx);
8903 fprintf (stderr, "New address:\n");
8904 debug_rtx (ret);
8906 if (insns)
8908 fprintf (stderr, "Insns added:\n");
8909 debug_rtx_list (insns, 20);
8912 else
8914 fprintf (stderr,
8915 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8916 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8918 debug_rtx (x);
8921 if (insns)
8922 emit_insn (insns);
8924 return ret;
8927 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8928 We need to emit DTP-relative relocations. */
8930 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8931 static void
8932 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8934 switch (size)
8936 case 4:
8937 fputs ("\t.long\t", file);
8938 break;
8939 case 8:
8940 fputs (DOUBLE_INT_ASM_OP, file);
8941 break;
8942 default:
8943 gcc_unreachable ();
8945 output_addr_const (file, x);
8946 if (TARGET_ELF)
8947 fputs ("@dtprel+0x8000", file);
8948 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8950 switch (SYMBOL_REF_TLS_MODEL (x))
8952 case 0:
8953 break;
8954 case TLS_MODEL_LOCAL_EXEC:
8955 fputs ("@le", file);
8956 break;
8957 case TLS_MODEL_INITIAL_EXEC:
8958 fputs ("@ie", file);
8959 break;
8960 case TLS_MODEL_GLOBAL_DYNAMIC:
8961 case TLS_MODEL_LOCAL_DYNAMIC:
8962 fputs ("@m", file);
8963 break;
8964 default:
8965 gcc_unreachable ();
8970 /* Return true if X is a symbol that refers to real (rather than emulated)
8971 TLS. */
8973 static bool
8974 rs6000_real_tls_symbol_ref_p (rtx x)
8976 return (GET_CODE (x) == SYMBOL_REF
8977 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8980 /* In the name of slightly smaller debug output, and to cater to
8981 general assembler lossage, recognize various UNSPEC sequences
8982 and turn them back into a direct symbol reference. */
8984 static rtx
8985 rs6000_delegitimize_address (rtx orig_x)
8987 rtx x, y, offset;
8989 orig_x = delegitimize_mem_from_attrs (orig_x);
8990 x = orig_x;
8991 if (MEM_P (x))
8992 x = XEXP (x, 0);
8994 y = x;
8995 if (TARGET_CMODEL != CMODEL_SMALL
8996 && GET_CODE (y) == LO_SUM)
8997 y = XEXP (y, 1);
8999 offset = NULL_RTX;
9000 if (GET_CODE (y) == PLUS
9001 && GET_MODE (y) == Pmode
9002 && CONST_INT_P (XEXP (y, 1)))
9004 offset = XEXP (y, 1);
9005 y = XEXP (y, 0);
9008 if (GET_CODE (y) == UNSPEC
9009 && XINT (y, 1) == UNSPEC_TOCREL)
9011 y = XVECEXP (y, 0, 0);
9013 #ifdef HAVE_AS_TLS
9014 /* Do not associate thread-local symbols with the original
9015 constant pool symbol. */
9016 if (TARGET_XCOFF
9017 && GET_CODE (y) == SYMBOL_REF
9018 && CONSTANT_POOL_ADDRESS_P (y)
9019 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
9020 return orig_x;
9021 #endif
9023 if (offset != NULL_RTX)
9024 y = gen_rtx_PLUS (Pmode, y, offset);
9025 if (!MEM_P (orig_x))
9026 return y;
9027 else
9028 return replace_equiv_address_nv (orig_x, y);
9031 if (TARGET_MACHO
9032 && GET_CODE (orig_x) == LO_SUM
9033 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9035 y = XEXP (XEXP (orig_x, 1), 0);
9036 if (GET_CODE (y) == UNSPEC
9037 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9038 return XVECEXP (y, 0, 0);
9041 return orig_x;
9044 /* Return true if X shouldn't be emitted into the debug info.
9045 The linker doesn't like .toc section references from
9046 .debug_* sections, so reject .toc section symbols. */
9048 static bool
9049 rs6000_const_not_ok_for_debug_p (rtx x)
9051 if (GET_CODE (x) == SYMBOL_REF
9052 && CONSTANT_POOL_ADDRESS_P (x))
9054 rtx c = get_pool_constant (x);
9055 machine_mode cmode = get_pool_mode (x);
9056 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9057 return true;
9060 return false;
9064 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9066 static bool
9067 rs6000_legitimate_combined_insn (rtx_insn *insn)
9069 int icode = INSN_CODE (insn);
9071 /* Reject creating doloop insns. Combine should not be allowed
9072 to create these for a number of reasons:
9073 1) In a nested loop, if combine creates one of these in an
9074 outer loop and the register allocator happens to allocate ctr
9075 to the outer loop insn, then the inner loop can't use ctr.
9076 Inner loops ought to be more highly optimized.
9077 2) Combine often wants to create one of these from what was
9078 originally a three insn sequence, first combining the three
9079 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9080 allocated ctr, the splitter takes use back to the three insn
9081 sequence. It's better to stop combine at the two insn
9082 sequence.
9083 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9084 insns, the register allocator sometimes uses floating point
9085 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9086 jump insn and output reloads are not implemented for jumps,
9087 the ctrsi/ctrdi splitters need to handle all possible cases.
9088 That's a pain, and it gets to be seriously difficult when a
9089 splitter that runs after reload needs memory to transfer from
9090 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9091 for the difficult case. It's better to not create problems
9092 in the first place. */
9093 if (icode != CODE_FOR_nothing
9094 && (icode == CODE_FOR_ctrsi_internal1
9095 || icode == CODE_FOR_ctrdi_internal1
9096 || icode == CODE_FOR_ctrsi_internal2
9097 || icode == CODE_FOR_ctrdi_internal2
9098 || icode == CODE_FOR_ctrsi_internal3
9099 || icode == CODE_FOR_ctrdi_internal3
9100 || icode == CODE_FOR_ctrsi_internal4
9101 || icode == CODE_FOR_ctrdi_internal4))
9102 return false;
9104 return true;
9107 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9109 static GTY(()) rtx rs6000_tls_symbol;
9110 static rtx
9111 rs6000_tls_get_addr (void)
9113 if (!rs6000_tls_symbol)
9114 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9116 return rs6000_tls_symbol;
9119 /* Construct the SYMBOL_REF for TLS GOT references. */
9121 static GTY(()) rtx rs6000_got_symbol;
9122 static rtx
9123 rs6000_got_sym (void)
9125 if (!rs6000_got_symbol)
9127 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9128 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9129 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9132 return rs6000_got_symbol;
9135 /* AIX Thread-Local Address support. */
9137 static rtx
9138 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9140 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9141 const char *name;
9142 char *tlsname;
9144 name = XSTR (addr, 0);
9145 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9146 or the symbol will be in TLS private data section. */
9147 if (name[strlen (name) - 1] != ']'
9148 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9149 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9151 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9152 strcpy (tlsname, name);
9153 strcat (tlsname,
9154 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9155 tlsaddr = copy_rtx (addr);
9156 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9158 else
9159 tlsaddr = addr;
9161 /* Place addr into TOC constant pool. */
9162 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9164 /* Output the TOC entry and create the MEM referencing the value. */
9165 if (constant_pool_expr_p (XEXP (sym, 0))
9166 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9168 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9169 mem = gen_const_mem (Pmode, tocref);
9170 set_mem_alias_set (mem, get_TOC_alias_set ());
9172 else
9173 return sym;
9175 /* Use global-dynamic for local-dynamic. */
9176 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9177 || model == TLS_MODEL_LOCAL_DYNAMIC)
9179 /* Create new TOC reference for @m symbol. */
9180 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9181 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9182 strcpy (tlsname, "*LCM");
9183 strcat (tlsname, name + 3);
9184 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9185 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9186 tocref = create_TOC_reference (modaddr, NULL_RTX);
9187 rtx modmem = gen_const_mem (Pmode, tocref);
9188 set_mem_alias_set (modmem, get_TOC_alias_set ());
9190 rtx modreg = gen_reg_rtx (Pmode);
9191 emit_insn (gen_rtx_SET (modreg, modmem));
9193 tmpreg = gen_reg_rtx (Pmode);
9194 emit_insn (gen_rtx_SET (tmpreg, mem));
9196 dest = gen_reg_rtx (Pmode);
9197 if (TARGET_32BIT)
9198 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9199 else
9200 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9201 return dest;
9203 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9204 else if (TARGET_32BIT)
9206 tlsreg = gen_reg_rtx (SImode);
9207 emit_insn (gen_tls_get_tpointer (tlsreg));
9209 else
9210 tlsreg = gen_rtx_REG (DImode, 13);
9212 /* Load the TOC value into temporary register. */
9213 tmpreg = gen_reg_rtx (Pmode);
9214 emit_insn (gen_rtx_SET (tmpreg, mem));
9215 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9216 gen_rtx_MINUS (Pmode, addr, tlsreg));
9218 /* Add TOC symbol value to TLS pointer. */
9219 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9221 return dest;
9224 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9225 this (thread-local) address. */
9227 static rtx
9228 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9230 rtx dest, insn;
9232 if (TARGET_XCOFF)
9233 return rs6000_legitimize_tls_address_aix (addr, model);
9235 dest = gen_reg_rtx (Pmode);
9236 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9238 rtx tlsreg;
9240 if (TARGET_64BIT)
9242 tlsreg = gen_rtx_REG (Pmode, 13);
9243 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9245 else
9247 tlsreg = gen_rtx_REG (Pmode, 2);
9248 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9250 emit_insn (insn);
9252 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9254 rtx tlsreg, tmp;
9256 tmp = gen_reg_rtx (Pmode);
9257 if (TARGET_64BIT)
9259 tlsreg = gen_rtx_REG (Pmode, 13);
9260 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9262 else
9264 tlsreg = gen_rtx_REG (Pmode, 2);
9265 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9267 emit_insn (insn);
9268 if (TARGET_64BIT)
9269 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9270 else
9271 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9272 emit_insn (insn);
9274 else
9276 rtx r3, got, tga, tmp1, tmp2, call_insn;
9278 /* We currently use relocations like @got@tlsgd for tls, which
9279 means the linker will handle allocation of tls entries, placing
9280 them in the .got section. So use a pointer to the .got section,
9281 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9282 or to secondary GOT sections used by 32-bit -fPIC. */
9283 if (TARGET_64BIT)
9284 got = gen_rtx_REG (Pmode, 2);
9285 else
9287 if (flag_pic == 1)
9288 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9289 else
9291 rtx gsym = rs6000_got_sym ();
9292 got = gen_reg_rtx (Pmode);
9293 if (flag_pic == 0)
9294 rs6000_emit_move (got, gsym, Pmode);
9295 else
9297 rtx mem, lab;
9299 tmp1 = gen_reg_rtx (Pmode);
9300 tmp2 = gen_reg_rtx (Pmode);
9301 mem = gen_const_mem (Pmode, tmp1);
9302 lab = gen_label_rtx ();
9303 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9304 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9305 if (TARGET_LINK_STACK)
9306 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9307 emit_move_insn (tmp2, mem);
9308 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9309 set_unique_reg_note (last, REG_EQUAL, gsym);
9314 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9316 tga = rs6000_tls_get_addr ();
9317 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9318 const0_rtx, Pmode);
9320 r3 = gen_rtx_REG (Pmode, 3);
9321 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9323 if (TARGET_64BIT)
9324 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9325 else
9326 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9328 else if (DEFAULT_ABI == ABI_V4)
9329 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9330 else
9331 gcc_unreachable ();
9332 call_insn = last_call_insn ();
9333 PATTERN (call_insn) = insn;
9334 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9335 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9336 pic_offset_table_rtx);
9338 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9340 tga = rs6000_tls_get_addr ();
9341 tmp1 = gen_reg_rtx (Pmode);
9342 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9343 const0_rtx, Pmode);
9345 r3 = gen_rtx_REG (Pmode, 3);
9346 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9348 if (TARGET_64BIT)
9349 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9350 else
9351 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9353 else if (DEFAULT_ABI == ABI_V4)
9354 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9355 else
9356 gcc_unreachable ();
9357 call_insn = last_call_insn ();
9358 PATTERN (call_insn) = insn;
9359 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9360 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9361 pic_offset_table_rtx);
9363 if (rs6000_tls_size == 16)
9365 if (TARGET_64BIT)
9366 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9367 else
9368 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9370 else if (rs6000_tls_size == 32)
9372 tmp2 = gen_reg_rtx (Pmode);
9373 if (TARGET_64BIT)
9374 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9375 else
9376 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9377 emit_insn (insn);
9378 if (TARGET_64BIT)
9379 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9380 else
9381 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9383 else
9385 tmp2 = gen_reg_rtx (Pmode);
9386 if (TARGET_64BIT)
9387 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9388 else
9389 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9390 emit_insn (insn);
9391 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9393 emit_insn (insn);
9395 else
9397 /* IE, or 64-bit offset LE. */
9398 tmp2 = gen_reg_rtx (Pmode);
9399 if (TARGET_64BIT)
9400 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9401 else
9402 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9403 emit_insn (insn);
9404 if (TARGET_64BIT)
9405 insn = gen_tls_tls_64 (dest, tmp2, addr);
9406 else
9407 insn = gen_tls_tls_32 (dest, tmp2, addr);
9408 emit_insn (insn);
9412 return dest;
9415 /* Only create the global variable for the stack protect guard if we are using
9416 the global flavor of that guard. */
9417 static tree
9418 rs6000_init_stack_protect_guard (void)
9420 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9421 return default_stack_protect_guard ();
9423 return NULL_TREE;
9426 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9428 static bool
9429 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9431 if (GET_CODE (x) == HIGH
9432 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9433 return true;
9435 /* A TLS symbol in the TOC cannot contain a sum. */
9436 if (GET_CODE (x) == CONST
9437 && GET_CODE (XEXP (x, 0)) == PLUS
9438 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9439 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9440 return true;
9442 /* Do not place an ELF TLS symbol in the constant pool. */
9443 return TARGET_ELF && tls_referenced_p (x);
9446 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9447 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9448 can be addressed relative to the toc pointer. */
9450 static bool
9451 use_toc_relative_ref (rtx sym, machine_mode mode)
9453 return ((constant_pool_expr_p (sym)
9454 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9455 get_pool_mode (sym)))
9456 || (TARGET_CMODEL == CMODEL_MEDIUM
9457 && SYMBOL_REF_LOCAL_P (sym)
9458 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9461 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9462 replace the input X, or the original X if no replacement is called for.
9463 The output parameter *WIN is 1 if the calling macro should goto WIN,
9464 0 if it should not.
9466 For RS/6000, we wish to handle large displacements off a base
9467 register by splitting the addend across an addiu/addis and the mem insn.
9468 This cuts number of extra insns needed from 3 to 1.
9470 On Darwin, we use this to generate code for floating point constants.
9471 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9472 The Darwin code is inside #if TARGET_MACHO because only then are the
9473 machopic_* functions defined. */
9474 static rtx
9475 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9476 int opnum, int type,
9477 int ind_levels ATTRIBUTE_UNUSED, int *win)
9479 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9480 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9482 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9483 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9484 if (reg_offset_p
9485 && opnum == 1
9486 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9487 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9488 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9489 && TARGET_P9_VECTOR)
9490 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9491 && TARGET_P9_VECTOR)))
9492 reg_offset_p = false;
9494 /* We must recognize output that we have already generated ourselves. */
9495 if (GET_CODE (x) == PLUS
9496 && GET_CODE (XEXP (x, 0)) == PLUS
9497 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9498 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9499 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9501 if (TARGET_DEBUG_ADDR)
9503 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9504 debug_rtx (x);
9506 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9507 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9508 opnum, (enum reload_type) type);
9509 *win = 1;
9510 return x;
9513 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9514 if (GET_CODE (x) == LO_SUM
9515 && GET_CODE (XEXP (x, 0)) == HIGH)
9517 if (TARGET_DEBUG_ADDR)
9519 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9520 debug_rtx (x);
9522 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9523 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9524 opnum, (enum reload_type) type);
9525 *win = 1;
9526 return x;
9529 #if TARGET_MACHO
9530 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9531 && GET_CODE (x) == LO_SUM
9532 && GET_CODE (XEXP (x, 0)) == PLUS
9533 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9534 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9535 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9536 && machopic_operand_p (XEXP (x, 1)))
9538 /* Result of previous invocation of this function on Darwin
9539 floating point constant. */
9540 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9541 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9542 opnum, (enum reload_type) type);
9543 *win = 1;
9544 return x;
9546 #endif
9548 if (TARGET_CMODEL != CMODEL_SMALL
9549 && reg_offset_p
9550 && !quad_offset_p
9551 && small_toc_ref (x, VOIDmode))
9553 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9554 x = gen_rtx_LO_SUM (Pmode, hi, x);
9555 if (TARGET_DEBUG_ADDR)
9557 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9558 debug_rtx (x);
9560 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9561 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9562 opnum, (enum reload_type) type);
9563 *win = 1;
9564 return x;
9567 if (GET_CODE (x) == PLUS
9568 && REG_P (XEXP (x, 0))
9569 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9570 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9571 && CONST_INT_P (XEXP (x, 1))
9572 && reg_offset_p
9573 && !PAIRED_VECTOR_MODE (mode)
9574 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9576 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9577 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9578 HOST_WIDE_INT high
9579 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9581 /* Check for 32-bit overflow or quad addresses with one of the
9582 four least significant bits set. */
9583 if (high + low != val
9584 || (quad_offset_p && (low & 0xf)))
9586 *win = 0;
9587 return x;
9590 /* Reload the high part into a base reg; leave the low part
9591 in the mem directly. */
9593 x = gen_rtx_PLUS (GET_MODE (x),
9594 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9595 GEN_INT (high)),
9596 GEN_INT (low));
9598 if (TARGET_DEBUG_ADDR)
9600 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9601 debug_rtx (x);
9603 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9604 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9605 opnum, (enum reload_type) type);
9606 *win = 1;
9607 return x;
9610 if (GET_CODE (x) == SYMBOL_REF
9611 && reg_offset_p
9612 && !quad_offset_p
9613 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9614 && !PAIRED_VECTOR_MODE (mode)
9615 #if TARGET_MACHO
9616 && DEFAULT_ABI == ABI_DARWIN
9617 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9618 && machopic_symbol_defined_p (x)
9619 #else
9620 && DEFAULT_ABI == ABI_V4
9621 && !flag_pic
9622 #endif
9623 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9624 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9625 without fprs.
9626 ??? Assume floating point reg based on mode? This assumption is
9627 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9628 where reload ends up doing a DFmode load of a constant from
9629 mem using two gprs. Unfortunately, at this point reload
9630 hasn't yet selected regs so poking around in reload data
9631 won't help and even if we could figure out the regs reliably,
9632 we'd still want to allow this transformation when the mem is
9633 naturally aligned. Since we say the address is good here, we
9634 can't disable offsets from LO_SUMs in mem_operand_gpr.
9635 FIXME: Allow offset from lo_sum for other modes too, when
9636 mem is sufficiently aligned.
9638 Also disallow this if the type can go in VMX/Altivec registers, since
9639 those registers do not have d-form (reg+offset) address modes. */
9640 && !reg_addr[mode].scalar_in_vmx_p
9641 && mode != TFmode
9642 && mode != TDmode
9643 && mode != IFmode
9644 && mode != KFmode
9645 && (mode != TImode || !TARGET_VSX)
9646 && mode != PTImode
9647 && (mode != DImode || TARGET_POWERPC64)
9648 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9649 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9651 #if TARGET_MACHO
9652 if (flag_pic)
9654 rtx offset = machopic_gen_offset (x);
9655 x = gen_rtx_LO_SUM (GET_MODE (x),
9656 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9657 gen_rtx_HIGH (Pmode, offset)), offset);
9659 else
9660 #endif
9661 x = gen_rtx_LO_SUM (GET_MODE (x),
9662 gen_rtx_HIGH (Pmode, x), x);
9664 if (TARGET_DEBUG_ADDR)
9666 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9667 debug_rtx (x);
9669 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9670 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9671 opnum, (enum reload_type) type);
9672 *win = 1;
9673 return x;
9676 /* Reload an offset address wrapped by an AND that represents the
9677 masking of the lower bits. Strip the outer AND and let reload
9678 convert the offset address into an indirect address. For VSX,
9679 force reload to create the address with an AND in a separate
9680 register, because we can't guarantee an altivec register will
9681 be used. */
9682 if (VECTOR_MEM_ALTIVEC_P (mode)
9683 && GET_CODE (x) == AND
9684 && GET_CODE (XEXP (x, 0)) == PLUS
9685 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9686 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9687 && GET_CODE (XEXP (x, 1)) == CONST_INT
9688 && INTVAL (XEXP (x, 1)) == -16)
9690 x = XEXP (x, 0);
9691 *win = 1;
9692 return x;
9695 if (TARGET_TOC
9696 && reg_offset_p
9697 && !quad_offset_p
9698 && GET_CODE (x) == SYMBOL_REF
9699 && use_toc_relative_ref (x, mode))
9701 x = create_TOC_reference (x, NULL_RTX);
9702 if (TARGET_CMODEL != CMODEL_SMALL)
9704 if (TARGET_DEBUG_ADDR)
9706 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9707 debug_rtx (x);
9709 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9710 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9711 opnum, (enum reload_type) type);
9713 *win = 1;
9714 return x;
9716 *win = 0;
9717 return x;
9720 /* Debug version of rs6000_legitimize_reload_address. */
9721 static rtx
9722 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9723 int opnum, int type,
9724 int ind_levels, int *win)
9726 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9727 ind_levels, win);
9728 fprintf (stderr,
9729 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9730 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9731 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9732 debug_rtx (x);
9734 if (x == ret)
9735 fprintf (stderr, "Same address returned\n");
9736 else if (!ret)
9737 fprintf (stderr, "NULL returned\n");
9738 else
9740 fprintf (stderr, "New address:\n");
9741 debug_rtx (ret);
9744 return ret;
9747 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9748 that is a valid memory address for an instruction.
9749 The MODE argument is the machine mode for the MEM expression
9750 that wants to use this address.
9752 On the RS/6000, there are four valid address: a SYMBOL_REF that
9753 refers to a constant pool entry of an address (or the sum of it
9754 plus a constant), a short (16-bit signed) constant plus a register,
9755 the sum of two registers, or a register indirect, possibly with an
9756 auto-increment. For DFmode, DDmode and DImode with a constant plus
9757 register, we must ensure that both words are addressable or PowerPC64
9758 with offset word aligned.
9760 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9761 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9762 because adjacent memory cells are accessed by adding word-sized offsets
9763 during assembly output. */
9764 static bool
9765 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9767 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9768 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9770 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9771 if (VECTOR_MEM_ALTIVEC_P (mode)
9772 && GET_CODE (x) == AND
9773 && GET_CODE (XEXP (x, 1)) == CONST_INT
9774 && INTVAL (XEXP (x, 1)) == -16)
9775 x = XEXP (x, 0);
9777 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9778 return 0;
9779 if (legitimate_indirect_address_p (x, reg_ok_strict))
9780 return 1;
9781 if (TARGET_UPDATE
9782 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9783 && mode_supports_pre_incdec_p (mode)
9784 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9785 return 1;
9786 /* Handle restricted vector d-form offsets in ISA 3.0. */
9787 if (quad_offset_p)
9789 if (quad_address_p (x, mode, reg_ok_strict))
9790 return 1;
9792 else if (virtual_stack_registers_memory_p (x))
9793 return 1;
9795 else if (reg_offset_p)
9797 if (legitimate_small_data_p (mode, x))
9798 return 1;
9799 if (legitimate_constant_pool_address_p (x, mode,
9800 reg_ok_strict || lra_in_progress))
9801 return 1;
9802 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9803 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9804 return 1;
9807 /* For TImode, if we have TImode in VSX registers, only allow register
9808 indirect addresses. This will allow the values to go in either GPRs
9809 or VSX registers without reloading. The vector types would tend to
9810 go into VSX registers, so we allow REG+REG, while TImode seems
9811 somewhat split, in that some uses are GPR based, and some VSX based. */
9812 /* FIXME: We could loosen this by changing the following to
9813 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9814 but currently we cannot allow REG+REG addressing for TImode. See
9815 PR72827 for complete details on how this ends up hoodwinking DSE. */
9816 if (mode == TImode && TARGET_VSX)
9817 return 0;
9818 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9819 if (! reg_ok_strict
9820 && reg_offset_p
9821 && GET_CODE (x) == PLUS
9822 && GET_CODE (XEXP (x, 0)) == REG
9823 && (XEXP (x, 0) == virtual_stack_vars_rtx
9824 || XEXP (x, 0) == arg_pointer_rtx)
9825 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9826 return 1;
9827 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9828 return 1;
9829 if (!FLOAT128_2REG_P (mode)
9830 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9831 || TARGET_POWERPC64
9832 || (mode != DFmode && mode != DDmode))
9833 && (TARGET_POWERPC64 || mode != DImode)
9834 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9835 && mode != PTImode
9836 && !avoiding_indexed_address_p (mode)
9837 && legitimate_indexed_address_p (x, reg_ok_strict))
9838 return 1;
9839 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9840 && mode_supports_pre_modify_p (mode)
9841 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9842 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9843 reg_ok_strict, false)
9844 || (!avoiding_indexed_address_p (mode)
9845 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9846 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9847 return 1;
9848 if (reg_offset_p && !quad_offset_p
9849 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9850 return 1;
9851 return 0;
9854 /* Debug version of rs6000_legitimate_address_p. */
9855 static bool
9856 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9857 bool reg_ok_strict)
9859 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9860 fprintf (stderr,
9861 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9862 "strict = %d, reload = %s, code = %s\n",
9863 ret ? "true" : "false",
9864 GET_MODE_NAME (mode),
9865 reg_ok_strict,
9866 (reload_completed ? "after" : "before"),
9867 GET_RTX_NAME (GET_CODE (x)));
9868 debug_rtx (x);
9870 return ret;
9873 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9875 static bool
9876 rs6000_mode_dependent_address_p (const_rtx addr,
9877 addr_space_t as ATTRIBUTE_UNUSED)
9879 return rs6000_mode_dependent_address_ptr (addr);
9882 /* Go to LABEL if ADDR (a legitimate address expression)
9883 has an effect that depends on the machine mode it is used for.
9885 On the RS/6000 this is true of all integral offsets (since AltiVec
9886 and VSX modes don't allow them) or is a pre-increment or decrement.
9888 ??? Except that due to conceptual problems in offsettable_address_p
9889 we can't really report the problems of integral offsets. So leave
9890 this assuming that the adjustable offset must be valid for the
9891 sub-words of a TFmode operand, which is what we had before. */
9893 static bool
9894 rs6000_mode_dependent_address (const_rtx addr)
9896 switch (GET_CODE (addr))
9898 case PLUS:
9899 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9900 is considered a legitimate address before reload, so there
9901 are no offset restrictions in that case. Note that this
9902 condition is safe in strict mode because any address involving
9903 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9904 been rejected as illegitimate. */
9905 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9906 && XEXP (addr, 0) != arg_pointer_rtx
9907 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9909 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9910 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9912 break;
9914 case LO_SUM:
9915 /* Anything in the constant pool is sufficiently aligned that
9916 all bytes have the same high part address. */
9917 return !legitimate_constant_pool_address_p (addr, QImode, false);
9919 /* Auto-increment cases are now treated generically in recog.c. */
9920 case PRE_MODIFY:
9921 return TARGET_UPDATE;
9923 /* AND is only allowed in Altivec loads. */
9924 case AND:
9925 return true;
9927 default:
9928 break;
9931 return false;
9934 /* Debug version of rs6000_mode_dependent_address. */
9935 static bool
9936 rs6000_debug_mode_dependent_address (const_rtx addr)
9938 bool ret = rs6000_mode_dependent_address (addr);
9940 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9941 ret ? "true" : "false");
9942 debug_rtx (addr);
9944 return ret;
9947 /* Implement FIND_BASE_TERM. */
9950 rs6000_find_base_term (rtx op)
9952 rtx base;
9954 base = op;
9955 if (GET_CODE (base) == CONST)
9956 base = XEXP (base, 0);
9957 if (GET_CODE (base) == PLUS)
9958 base = XEXP (base, 0);
9959 if (GET_CODE (base) == UNSPEC)
9960 switch (XINT (base, 1))
9962 case UNSPEC_TOCREL:
9963 case UNSPEC_MACHOPIC_OFFSET:
9964 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9965 for aliasing purposes. */
9966 return XVECEXP (base, 0, 0);
9969 return op;
9972 /* More elaborate version of recog's offsettable_memref_p predicate
9973 that works around the ??? note of rs6000_mode_dependent_address.
9974 In particular it accepts
9976 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9978 in 32-bit mode, that the recog predicate rejects. */
9980 static bool
9981 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9983 bool worst_case;
9985 if (!MEM_P (op))
9986 return false;
9988 /* First mimic offsettable_memref_p. */
9989 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9990 return true;
9992 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9993 the latter predicate knows nothing about the mode of the memory
9994 reference and, therefore, assumes that it is the largest supported
9995 mode (TFmode). As a consequence, legitimate offsettable memory
9996 references are rejected. rs6000_legitimate_offset_address_p contains
9997 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9998 at least with a little bit of help here given that we know the
9999 actual registers used. */
10000 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
10001 || GET_MODE_SIZE (reg_mode) == 4);
10002 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
10003 true, worst_case);
10006 /* Determine the reassociation width to be used in reassociate_bb.
10007 This takes into account how many parallel operations we
10008 can actually do of a given type, and also the latency.
10010 int add/sub 6/cycle
10011 mul 2/cycle
10012 vect add/sub/mul 2/cycle
10013 fp add/sub/mul 2/cycle
10014 dfp 1/cycle
10017 static int
10018 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
10019 machine_mode mode)
10021 switch (rs6000_cpu)
10023 case PROCESSOR_POWER8:
10024 case PROCESSOR_POWER9:
10025 if (DECIMAL_FLOAT_MODE_P (mode))
10026 return 1;
10027 if (VECTOR_MODE_P (mode))
10028 return 4;
10029 if (INTEGRAL_MODE_P (mode))
10030 return opc == MULT_EXPR ? 4 : 6;
10031 if (FLOAT_MODE_P (mode))
10032 return 4;
10033 break;
10034 default:
10035 break;
10037 return 1;
10040 /* Change register usage conditional on target flags. */
10041 static void
10042 rs6000_conditional_register_usage (void)
10044 int i;
10046 if (TARGET_DEBUG_TARGET)
10047 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10049 /* Set MQ register fixed (already call_used) so that it will not be
10050 allocated. */
10051 fixed_regs[64] = 1;
10053 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10054 if (TARGET_64BIT)
10055 fixed_regs[13] = call_used_regs[13]
10056 = call_really_used_regs[13] = 1;
10058 /* Conditionally disable FPRs. */
10059 if (TARGET_SOFT_FLOAT)
10060 for (i = 32; i < 64; i++)
10061 fixed_regs[i] = call_used_regs[i]
10062 = call_really_used_regs[i] = 1;
10064 /* The TOC register is not killed across calls in a way that is
10065 visible to the compiler. */
10066 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10067 call_really_used_regs[2] = 0;
10069 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10070 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10072 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10073 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10074 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10075 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10077 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10078 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10079 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10080 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10082 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10083 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10084 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10086 if (!TARGET_ALTIVEC && !TARGET_VSX)
10088 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10089 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10090 call_really_used_regs[VRSAVE_REGNO] = 1;
10093 if (TARGET_ALTIVEC || TARGET_VSX)
10094 global_regs[VSCR_REGNO] = 1;
10096 if (TARGET_ALTIVEC_ABI)
10098 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10099 call_used_regs[i] = call_really_used_regs[i] = 1;
10101 /* AIX reserves VR20:31 in non-extended ABI mode. */
10102 if (TARGET_XCOFF)
10103 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10104 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10109 /* Output insns to set DEST equal to the constant SOURCE as a series of
10110 lis, ori and shl instructions and return TRUE. */
10112 bool
10113 rs6000_emit_set_const (rtx dest, rtx source)
10115 machine_mode mode = GET_MODE (dest);
10116 rtx temp, set;
10117 rtx_insn *insn;
10118 HOST_WIDE_INT c;
10120 gcc_checking_assert (CONST_INT_P (source));
10121 c = INTVAL (source);
10122 switch (mode)
10124 case E_QImode:
10125 case E_HImode:
10126 emit_insn (gen_rtx_SET (dest, source));
10127 return true;
10129 case E_SImode:
10130 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10132 emit_insn (gen_rtx_SET (copy_rtx (temp),
10133 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10134 emit_insn (gen_rtx_SET (dest,
10135 gen_rtx_IOR (SImode, copy_rtx (temp),
10136 GEN_INT (c & 0xffff))));
10137 break;
10139 case E_DImode:
10140 if (!TARGET_POWERPC64)
10142 rtx hi, lo;
10144 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10145 DImode);
10146 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10147 DImode);
10148 emit_move_insn (hi, GEN_INT (c >> 32));
10149 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10150 emit_move_insn (lo, GEN_INT (c));
10152 else
10153 rs6000_emit_set_long_const (dest, c);
10154 break;
10156 default:
10157 gcc_unreachable ();
10160 insn = get_last_insn ();
10161 set = single_set (insn);
10162 if (! CONSTANT_P (SET_SRC (set)))
10163 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10165 return true;
10168 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10169 Output insns to set DEST equal to the constant C as a series of
10170 lis, ori and shl instructions. */
10172 static void
10173 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10175 rtx temp;
10176 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10178 ud1 = c & 0xffff;
10179 c = c >> 16;
10180 ud2 = c & 0xffff;
10181 c = c >> 16;
10182 ud3 = c & 0xffff;
10183 c = c >> 16;
10184 ud4 = c & 0xffff;
10186 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10187 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10188 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10190 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10191 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10193 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10195 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10196 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10197 if (ud1 != 0)
10198 emit_move_insn (dest,
10199 gen_rtx_IOR (DImode, copy_rtx (temp),
10200 GEN_INT (ud1)));
10202 else if (ud3 == 0 && ud4 == 0)
10204 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10206 gcc_assert (ud2 & 0x8000);
10207 emit_move_insn (copy_rtx (temp),
10208 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10209 if (ud1 != 0)
10210 emit_move_insn (copy_rtx (temp),
10211 gen_rtx_IOR (DImode, copy_rtx (temp),
10212 GEN_INT (ud1)));
10213 emit_move_insn (dest,
10214 gen_rtx_ZERO_EXTEND (DImode,
10215 gen_lowpart (SImode,
10216 copy_rtx (temp))));
10218 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10219 || (ud4 == 0 && ! (ud3 & 0x8000)))
10221 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10223 emit_move_insn (copy_rtx (temp),
10224 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10225 if (ud2 != 0)
10226 emit_move_insn (copy_rtx (temp),
10227 gen_rtx_IOR (DImode, copy_rtx (temp),
10228 GEN_INT (ud2)));
10229 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10230 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10231 GEN_INT (16)));
10232 if (ud1 != 0)
10233 emit_move_insn (dest,
10234 gen_rtx_IOR (DImode, copy_rtx (temp),
10235 GEN_INT (ud1)));
10237 else
10239 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10241 emit_move_insn (copy_rtx (temp),
10242 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10243 if (ud3 != 0)
10244 emit_move_insn (copy_rtx (temp),
10245 gen_rtx_IOR (DImode, copy_rtx (temp),
10246 GEN_INT (ud3)));
10248 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10249 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10250 GEN_INT (32)));
10251 if (ud2 != 0)
10252 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10253 gen_rtx_IOR (DImode, copy_rtx (temp),
10254 GEN_INT (ud2 << 16)));
10255 if (ud1 != 0)
10256 emit_move_insn (dest,
10257 gen_rtx_IOR (DImode, copy_rtx (temp),
10258 GEN_INT (ud1)));
10262 /* Helper for the following. Get rid of [r+r] memory refs
10263 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10265 static void
10266 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10268 if (GET_CODE (operands[0]) == MEM
10269 && GET_CODE (XEXP (operands[0], 0)) != REG
10270 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10271 GET_MODE (operands[0]), false))
10272 operands[0]
10273 = replace_equiv_address (operands[0],
10274 copy_addr_to_reg (XEXP (operands[0], 0)));
10276 if (GET_CODE (operands[1]) == MEM
10277 && GET_CODE (XEXP (operands[1], 0)) != REG
10278 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10279 GET_MODE (operands[1]), false))
10280 operands[1]
10281 = replace_equiv_address (operands[1],
10282 copy_addr_to_reg (XEXP (operands[1], 0)));
10285 /* Generate a vector of constants to permute MODE for a little-endian
10286 storage operation by swapping the two halves of a vector. */
10287 static rtvec
10288 rs6000_const_vec (machine_mode mode)
10290 int i, subparts;
10291 rtvec v;
10293 switch (mode)
10295 case E_V1TImode:
10296 subparts = 1;
10297 break;
10298 case E_V2DFmode:
10299 case E_V2DImode:
10300 subparts = 2;
10301 break;
10302 case E_V4SFmode:
10303 case E_V4SImode:
10304 subparts = 4;
10305 break;
10306 case E_V8HImode:
10307 subparts = 8;
10308 break;
10309 case E_V16QImode:
10310 subparts = 16;
10311 break;
10312 default:
10313 gcc_unreachable();
10316 v = rtvec_alloc (subparts);
10318 for (i = 0; i < subparts / 2; ++i)
10319 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10320 for (i = subparts / 2; i < subparts; ++i)
10321 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10323 return v;
10326 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10327 store operation. */
10328 void
10329 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10331 /* Scalar permutations are easier to express in integer modes rather than
10332 floating-point modes, so cast them here. We use V1TImode instead
10333 of TImode to ensure that the values don't go through GPRs. */
10334 if (FLOAT128_VECTOR_P (mode))
10336 dest = gen_lowpart (V1TImode, dest);
10337 source = gen_lowpart (V1TImode, source);
10338 mode = V1TImode;
10341 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10342 scalar. */
10343 if (mode == TImode || mode == V1TImode)
10344 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10345 GEN_INT (64))));
10346 else
10348 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10349 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10353 /* Emit a little-endian load from vector memory location SOURCE to VSX
10354 register DEST in mode MODE. The load is done with two permuting
10355 insn's that represent an lxvd2x and xxpermdi. */
10356 void
10357 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10359 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10360 V1TImode). */
10361 if (mode == TImode || mode == V1TImode)
10363 mode = V2DImode;
10364 dest = gen_lowpart (V2DImode, dest);
10365 source = adjust_address (source, V2DImode, 0);
10368 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10369 rs6000_emit_le_vsx_permute (tmp, source, mode);
10370 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10373 /* Emit a little-endian store to vector memory location DEST from VSX
10374 register SOURCE in mode MODE. The store is done with two permuting
10375 insn's that represent an xxpermdi and an stxvd2x. */
10376 void
10377 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10379 /* This should never be called during or after LRA, because it does
10380 not re-permute the source register. It is intended only for use
10381 during expand. */
10382 gcc_assert (!lra_in_progress && !reload_completed);
10384 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10385 V1TImode). */
10386 if (mode == TImode || mode == V1TImode)
10388 mode = V2DImode;
10389 dest = adjust_address (dest, V2DImode, 0);
10390 source = gen_lowpart (V2DImode, source);
10393 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10394 rs6000_emit_le_vsx_permute (tmp, source, mode);
10395 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10398 /* Emit a sequence representing a little-endian VSX load or store,
10399 moving data from SOURCE to DEST in mode MODE. This is done
10400 separately from rs6000_emit_move to ensure it is called only
10401 during expand. LE VSX loads and stores introduced later are
10402 handled with a split. The expand-time RTL generation allows
10403 us to optimize away redundant pairs of register-permutes. */
10404 void
10405 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10407 gcc_assert (!BYTES_BIG_ENDIAN
10408 && VECTOR_MEM_VSX_P (mode)
10409 && !TARGET_P9_VECTOR
10410 && !gpr_or_gpr_p (dest, source)
10411 && (MEM_P (source) ^ MEM_P (dest)));
10413 if (MEM_P (source))
10415 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10416 rs6000_emit_le_vsx_load (dest, source, mode);
10418 else
10420 if (!REG_P (source))
10421 source = force_reg (mode, source);
10422 rs6000_emit_le_vsx_store (dest, source, mode);
10426 /* Return whether a SFmode or SImode move can be done without converting one
10427 mode to another. This arrises when we have:
10429 (SUBREG:SF (REG:SI ...))
10430 (SUBREG:SI (REG:SF ...))
10432 and one of the values is in a floating point/vector register, where SFmode
10433 scalars are stored in DFmode format. */
10435 bool
10436 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10438 if (TARGET_ALLOW_SF_SUBREG)
10439 return true;
10441 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10442 return true;
10444 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10445 return true;
10447 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10448 if (SUBREG_P (dest))
10450 rtx dest_subreg = SUBREG_REG (dest);
10451 rtx src_subreg = SUBREG_REG (src);
10452 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10455 return false;
10459 /* Helper function to change moves with:
10461 (SUBREG:SF (REG:SI)) and
10462 (SUBREG:SI (REG:SF))
10464 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10465 values are stored as DFmode values in the VSX registers. We need to convert
10466 the bits before we can use a direct move or operate on the bits in the
10467 vector register as an integer type.
10469 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10471 static bool
10472 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10474 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10475 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10476 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10478 rtx inner_source = SUBREG_REG (source);
10479 machine_mode inner_mode = GET_MODE (inner_source);
10481 if (mode == SImode && inner_mode == SFmode)
10483 emit_insn (gen_movsi_from_sf (dest, inner_source));
10484 return true;
10487 if (mode == SFmode && inner_mode == SImode)
10489 emit_insn (gen_movsf_from_si (dest, inner_source));
10490 return true;
10494 return false;
10497 /* Emit a move from SOURCE to DEST in mode MODE. */
10498 void
10499 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10501 rtx operands[2];
10502 operands[0] = dest;
10503 operands[1] = source;
10505 if (TARGET_DEBUG_ADDR)
10507 fprintf (stderr,
10508 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10509 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10510 GET_MODE_NAME (mode),
10511 lra_in_progress,
10512 reload_completed,
10513 can_create_pseudo_p ());
10514 debug_rtx (dest);
10515 fprintf (stderr, "source:\n");
10516 debug_rtx (source);
10519 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10520 if (CONST_WIDE_INT_P (operands[1])
10521 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10523 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10524 gcc_unreachable ();
10527 /* See if we need to special case SImode/SFmode SUBREG moves. */
10528 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10529 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10530 return;
10532 /* Check if GCC is setting up a block move that will end up using FP
10533 registers as temporaries. We must make sure this is acceptable. */
10534 if (GET_CODE (operands[0]) == MEM
10535 && GET_CODE (operands[1]) == MEM
10536 && mode == DImode
10537 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
10538 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
10539 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
10540 ? 32 : MEM_ALIGN (operands[0])))
10541 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
10542 ? 32
10543 : MEM_ALIGN (operands[1]))))
10544 && ! MEM_VOLATILE_P (operands [0])
10545 && ! MEM_VOLATILE_P (operands [1]))
10547 emit_move_insn (adjust_address (operands[0], SImode, 0),
10548 adjust_address (operands[1], SImode, 0));
10549 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10550 adjust_address (copy_rtx (operands[1]), SImode, 4));
10551 return;
10554 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10555 && !gpc_reg_operand (operands[1], mode))
10556 operands[1] = force_reg (mode, operands[1]);
10558 /* Recognize the case where operand[1] is a reference to thread-local
10559 data and load its address to a register. */
10560 if (tls_referenced_p (operands[1]))
10562 enum tls_model model;
10563 rtx tmp = operands[1];
10564 rtx addend = NULL;
10566 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10568 addend = XEXP (XEXP (tmp, 0), 1);
10569 tmp = XEXP (XEXP (tmp, 0), 0);
10572 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10573 model = SYMBOL_REF_TLS_MODEL (tmp);
10574 gcc_assert (model != 0);
10576 tmp = rs6000_legitimize_tls_address (tmp, model);
10577 if (addend)
10579 tmp = gen_rtx_PLUS (mode, tmp, addend);
10580 tmp = force_operand (tmp, operands[0]);
10582 operands[1] = tmp;
10585 /* 128-bit constant floating-point values on Darwin should really be loaded
10586 as two parts. However, this premature splitting is a problem when DFmode
10587 values can go into Altivec registers. */
10588 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10589 && GET_CODE (operands[1]) == CONST_DOUBLE)
10591 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10592 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10593 DFmode);
10594 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10595 GET_MODE_SIZE (DFmode)),
10596 simplify_gen_subreg (DFmode, operands[1], mode,
10597 GET_MODE_SIZE (DFmode)),
10598 DFmode);
10599 return;
10602 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10603 p1:SD) if p1 is not of floating point class and p0 is spilled as
10604 we can have no analogous movsd_store for this. */
10605 if (lra_in_progress && mode == DDmode
10606 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10607 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10608 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10609 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10611 enum reg_class cl;
10612 int regno = REGNO (SUBREG_REG (operands[1]));
10614 if (regno >= FIRST_PSEUDO_REGISTER)
10616 cl = reg_preferred_class (regno);
10617 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10619 if (regno >= 0 && ! FP_REGNO_P (regno))
10621 mode = SDmode;
10622 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10623 operands[1] = SUBREG_REG (operands[1]);
10626 if (lra_in_progress
10627 && mode == SDmode
10628 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10629 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10630 && (REG_P (operands[1])
10631 || (GET_CODE (operands[1]) == SUBREG
10632 && REG_P (SUBREG_REG (operands[1])))))
10634 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10635 ? SUBREG_REG (operands[1]) : operands[1]);
10636 enum reg_class cl;
10638 if (regno >= FIRST_PSEUDO_REGISTER)
10640 cl = reg_preferred_class (regno);
10641 gcc_assert (cl != NO_REGS);
10642 regno = ira_class_hard_regs[cl][0];
10644 if (FP_REGNO_P (regno))
10646 if (GET_MODE (operands[0]) != DDmode)
10647 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10648 emit_insn (gen_movsd_store (operands[0], operands[1]));
10650 else if (INT_REGNO_P (regno))
10651 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10652 else
10653 gcc_unreachable();
10654 return;
10656 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10657 p:DD)) if p0 is not of floating point class and p1 is spilled as
10658 we can have no analogous movsd_load for this. */
10659 if (lra_in_progress && mode == DDmode
10660 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10661 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10662 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10663 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10665 enum reg_class cl;
10666 int regno = REGNO (SUBREG_REG (operands[0]));
10668 if (regno >= FIRST_PSEUDO_REGISTER)
10670 cl = reg_preferred_class (regno);
10671 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10673 if (regno >= 0 && ! FP_REGNO_P (regno))
10675 mode = SDmode;
10676 operands[0] = SUBREG_REG (operands[0]);
10677 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10680 if (lra_in_progress
10681 && mode == SDmode
10682 && (REG_P (operands[0])
10683 || (GET_CODE (operands[0]) == SUBREG
10684 && REG_P (SUBREG_REG (operands[0]))))
10685 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10686 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10688 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10689 ? SUBREG_REG (operands[0]) : operands[0]);
10690 enum reg_class cl;
10692 if (regno >= FIRST_PSEUDO_REGISTER)
10694 cl = reg_preferred_class (regno);
10695 gcc_assert (cl != NO_REGS);
10696 regno = ira_class_hard_regs[cl][0];
10698 if (FP_REGNO_P (regno))
10700 if (GET_MODE (operands[1]) != DDmode)
10701 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10702 emit_insn (gen_movsd_load (operands[0], operands[1]));
10704 else if (INT_REGNO_P (regno))
10705 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10706 else
10707 gcc_unreachable();
10708 return;
10711 /* FIXME: In the long term, this switch statement should go away
10712 and be replaced by a sequence of tests based on things like
10713 mode == Pmode. */
10714 switch (mode)
10716 case E_HImode:
10717 case E_QImode:
10718 if (CONSTANT_P (operands[1])
10719 && GET_CODE (operands[1]) != CONST_INT)
10720 operands[1] = force_const_mem (mode, operands[1]);
10721 break;
10723 case E_TFmode:
10724 case E_TDmode:
10725 case E_IFmode:
10726 case E_KFmode:
10727 if (FLOAT128_2REG_P (mode))
10728 rs6000_eliminate_indexed_memrefs (operands);
10729 /* fall through */
10731 case E_DFmode:
10732 case E_DDmode:
10733 case E_SFmode:
10734 case E_SDmode:
10735 if (CONSTANT_P (operands[1])
10736 && ! easy_fp_constant (operands[1], mode))
10737 operands[1] = force_const_mem (mode, operands[1]);
10738 break;
10740 case E_V16QImode:
10741 case E_V8HImode:
10742 case E_V4SFmode:
10743 case E_V4SImode:
10744 case E_V2SFmode:
10745 case E_V2SImode:
10746 case E_V2DFmode:
10747 case E_V2DImode:
10748 case E_V1TImode:
10749 if (CONSTANT_P (operands[1])
10750 && !easy_vector_constant (operands[1], mode))
10751 operands[1] = force_const_mem (mode, operands[1]);
10752 break;
10754 case E_SImode:
10755 case E_DImode:
10756 /* Use default pattern for address of ELF small data */
10757 if (TARGET_ELF
10758 && mode == Pmode
10759 && DEFAULT_ABI == ABI_V4
10760 && (GET_CODE (operands[1]) == SYMBOL_REF
10761 || GET_CODE (operands[1]) == CONST)
10762 && small_data_operand (operands[1], mode))
10764 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10765 return;
10768 if (DEFAULT_ABI == ABI_V4
10769 && mode == Pmode && mode == SImode
10770 && flag_pic == 1 && got_operand (operands[1], mode))
10772 emit_insn (gen_movsi_got (operands[0], operands[1]));
10773 return;
10776 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10777 && TARGET_NO_TOC
10778 && ! flag_pic
10779 && mode == Pmode
10780 && CONSTANT_P (operands[1])
10781 && GET_CODE (operands[1]) != HIGH
10782 && GET_CODE (operands[1]) != CONST_INT)
10784 rtx target = (!can_create_pseudo_p ()
10785 ? operands[0]
10786 : gen_reg_rtx (mode));
10788 /* If this is a function address on -mcall-aixdesc,
10789 convert it to the address of the descriptor. */
10790 if (DEFAULT_ABI == ABI_AIX
10791 && GET_CODE (operands[1]) == SYMBOL_REF
10792 && XSTR (operands[1], 0)[0] == '.')
10794 const char *name = XSTR (operands[1], 0);
10795 rtx new_ref;
10796 while (*name == '.')
10797 name++;
10798 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10799 CONSTANT_POOL_ADDRESS_P (new_ref)
10800 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10801 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10802 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10803 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10804 operands[1] = new_ref;
10807 if (DEFAULT_ABI == ABI_DARWIN)
10809 #if TARGET_MACHO
10810 if (MACHO_DYNAMIC_NO_PIC_P)
10812 /* Take care of any required data indirection. */
10813 operands[1] = rs6000_machopic_legitimize_pic_address (
10814 operands[1], mode, operands[0]);
10815 if (operands[0] != operands[1])
10816 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10817 return;
10819 #endif
10820 emit_insn (gen_macho_high (target, operands[1]));
10821 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10822 return;
10825 emit_insn (gen_elf_high (target, operands[1]));
10826 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10827 return;
10830 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10831 and we have put it in the TOC, we just need to make a TOC-relative
10832 reference to it. */
10833 if (TARGET_TOC
10834 && GET_CODE (operands[1]) == SYMBOL_REF
10835 && use_toc_relative_ref (operands[1], mode))
10836 operands[1] = create_TOC_reference (operands[1], operands[0]);
10837 else if (mode == Pmode
10838 && CONSTANT_P (operands[1])
10839 && GET_CODE (operands[1]) != HIGH
10840 && ((GET_CODE (operands[1]) != CONST_INT
10841 && ! easy_fp_constant (operands[1], mode))
10842 || (GET_CODE (operands[1]) == CONST_INT
10843 && (num_insns_constant (operands[1], mode)
10844 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10845 || (GET_CODE (operands[0]) == REG
10846 && FP_REGNO_P (REGNO (operands[0]))))
10847 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10848 && (TARGET_CMODEL == CMODEL_SMALL
10849 || can_create_pseudo_p ()
10850 || (REG_P (operands[0])
10851 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10854 #if TARGET_MACHO
10855 /* Darwin uses a special PIC legitimizer. */
10856 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10858 operands[1] =
10859 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10860 operands[0]);
10861 if (operands[0] != operands[1])
10862 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10863 return;
10865 #endif
10867 /* If we are to limit the number of things we put in the TOC and
10868 this is a symbol plus a constant we can add in one insn,
10869 just put the symbol in the TOC and add the constant. */
10870 if (GET_CODE (operands[1]) == CONST
10871 && TARGET_NO_SUM_IN_TOC
10872 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10873 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10874 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10875 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10876 && ! side_effects_p (operands[0]))
10878 rtx sym =
10879 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10880 rtx other = XEXP (XEXP (operands[1], 0), 1);
10882 sym = force_reg (mode, sym);
10883 emit_insn (gen_add3_insn (operands[0], sym, other));
10884 return;
10887 operands[1] = force_const_mem (mode, operands[1]);
10889 if (TARGET_TOC
10890 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10891 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10893 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10894 operands[0]);
10895 operands[1] = gen_const_mem (mode, tocref);
10896 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10899 break;
10901 case E_TImode:
10902 if (!VECTOR_MEM_VSX_P (TImode))
10903 rs6000_eliminate_indexed_memrefs (operands);
10904 break;
10906 case E_PTImode:
10907 rs6000_eliminate_indexed_memrefs (operands);
10908 break;
10910 default:
10911 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10914 /* Above, we may have called force_const_mem which may have returned
10915 an invalid address. If we can, fix this up; otherwise, reload will
10916 have to deal with it. */
10917 if (GET_CODE (operands[1]) == MEM)
10918 operands[1] = validize_mem (operands[1]);
10920 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10923 /* Nonzero if we can use a floating-point register to pass this arg. */
10924 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10925 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10926 && (CUM)->fregno <= FP_ARG_MAX_REG \
10927 && TARGET_HARD_FLOAT)
10929 /* Nonzero if we can use an AltiVec register to pass this arg. */
10930 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10931 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10932 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10933 && TARGET_ALTIVEC_ABI \
10934 && (NAMED))
10936 /* Walk down the type tree of TYPE counting consecutive base elements.
10937 If *MODEP is VOIDmode, then set it to the first valid floating point
10938 or vector type. If a non-floating point or vector type is found, or
10939 if a floating point or vector type that doesn't match a non-VOIDmode
10940 *MODEP is found, then return -1, otherwise return the count in the
10941 sub-tree. */
10943 static int
10944 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10946 machine_mode mode;
10947 HOST_WIDE_INT size;
10949 switch (TREE_CODE (type))
10951 case REAL_TYPE:
10952 mode = TYPE_MODE (type);
10953 if (!SCALAR_FLOAT_MODE_P (mode))
10954 return -1;
10956 if (*modep == VOIDmode)
10957 *modep = mode;
10959 if (*modep == mode)
10960 return 1;
10962 break;
10964 case COMPLEX_TYPE:
10965 mode = TYPE_MODE (TREE_TYPE (type));
10966 if (!SCALAR_FLOAT_MODE_P (mode))
10967 return -1;
10969 if (*modep == VOIDmode)
10970 *modep = mode;
10972 if (*modep == mode)
10973 return 2;
10975 break;
10977 case VECTOR_TYPE:
10978 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10979 return -1;
10981 /* Use V4SImode as representative of all 128-bit vector types. */
10982 size = int_size_in_bytes (type);
10983 switch (size)
10985 case 16:
10986 mode = V4SImode;
10987 break;
10988 default:
10989 return -1;
10992 if (*modep == VOIDmode)
10993 *modep = mode;
10995 /* Vector modes are considered to be opaque: two vectors are
10996 equivalent for the purposes of being homogeneous aggregates
10997 if they are the same size. */
10998 if (*modep == mode)
10999 return 1;
11001 break;
11003 case ARRAY_TYPE:
11005 int count;
11006 tree index = TYPE_DOMAIN (type);
11008 /* Can't handle incomplete types nor sizes that are not
11009 fixed. */
11010 if (!COMPLETE_TYPE_P (type)
11011 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11012 return -1;
11014 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11015 if (count == -1
11016 || !index
11017 || !TYPE_MAX_VALUE (index)
11018 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11019 || !TYPE_MIN_VALUE (index)
11020 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11021 || count < 0)
11022 return -1;
11024 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11025 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11027 /* There must be no padding. */
11028 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11029 return -1;
11031 return count;
11034 case RECORD_TYPE:
11036 int count = 0;
11037 int sub_count;
11038 tree field;
11040 /* Can't handle incomplete types nor sizes that are not
11041 fixed. */
11042 if (!COMPLETE_TYPE_P (type)
11043 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11044 return -1;
11046 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11048 if (TREE_CODE (field) != FIELD_DECL)
11049 continue;
11051 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11052 if (sub_count < 0)
11053 return -1;
11054 count += sub_count;
11057 /* There must be no padding. */
11058 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11059 return -1;
11061 return count;
11064 case UNION_TYPE:
11065 case QUAL_UNION_TYPE:
11067 /* These aren't very interesting except in a degenerate case. */
11068 int count = 0;
11069 int sub_count;
11070 tree field;
11072 /* Can't handle incomplete types nor sizes that are not
11073 fixed. */
11074 if (!COMPLETE_TYPE_P (type)
11075 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11076 return -1;
11078 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11080 if (TREE_CODE (field) != FIELD_DECL)
11081 continue;
11083 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11084 if (sub_count < 0)
11085 return -1;
11086 count = count > sub_count ? count : sub_count;
11089 /* There must be no padding. */
11090 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11091 return -1;
11093 return count;
11096 default:
11097 break;
11100 return -1;
11103 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11104 float or vector aggregate that shall be passed in FP/vector registers
11105 according to the ELFv2 ABI, return the homogeneous element mode in
11106 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11108 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11110 static bool
11111 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11112 machine_mode *elt_mode,
11113 int *n_elts)
11115 /* Note that we do not accept complex types at the top level as
11116 homogeneous aggregates; these types are handled via the
11117 targetm.calls.split_complex_arg mechanism. Complex types
11118 can be elements of homogeneous aggregates, however. */
11119 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11121 machine_mode field_mode = VOIDmode;
11122 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11124 if (field_count > 0)
11126 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11127 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11129 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11130 up to AGGR_ARG_NUM_REG registers. */
11131 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11133 if (elt_mode)
11134 *elt_mode = field_mode;
11135 if (n_elts)
11136 *n_elts = field_count;
11137 return true;
11142 if (elt_mode)
11143 *elt_mode = mode;
11144 if (n_elts)
11145 *n_elts = 1;
11146 return false;
11149 /* Return a nonzero value to say to return the function value in
11150 memory, just as large structures are always returned. TYPE will be
11151 the data type of the value, and FNTYPE will be the type of the
11152 function doing the returning, or @code{NULL} for libcalls.
11154 The AIX ABI for the RS/6000 specifies that all structures are
11155 returned in memory. The Darwin ABI does the same.
11157 For the Darwin 64 Bit ABI, a function result can be returned in
11158 registers or in memory, depending on the size of the return data
11159 type. If it is returned in registers, the value occupies the same
11160 registers as it would if it were the first and only function
11161 argument. Otherwise, the function places its result in memory at
11162 the location pointed to by GPR3.
11164 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11165 but a draft put them in memory, and GCC used to implement the draft
11166 instead of the final standard. Therefore, aix_struct_return
11167 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11168 compatibility can change DRAFT_V4_STRUCT_RET to override the
11169 default, and -m switches get the final word. See
11170 rs6000_option_override_internal for more details.
11172 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11173 long double support is enabled. These values are returned in memory.
11175 int_size_in_bytes returns -1 for variable size objects, which go in
11176 memory always. The cast to unsigned makes -1 > 8. */
11178 static bool
11179 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11181 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11182 if (TARGET_MACHO
11183 && rs6000_darwin64_abi
11184 && TREE_CODE (type) == RECORD_TYPE
11185 && int_size_in_bytes (type) > 0)
11187 CUMULATIVE_ARGS valcum;
11188 rtx valret;
11190 valcum.words = 0;
11191 valcum.fregno = FP_ARG_MIN_REG;
11192 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11193 /* Do a trial code generation as if this were going to be passed
11194 as an argument; if any part goes in memory, we return NULL. */
11195 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11196 if (valret)
11197 return false;
11198 /* Otherwise fall through to more conventional ABI rules. */
11201 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11202 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11203 NULL, NULL))
11204 return false;
11206 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11207 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11208 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11209 return false;
11211 if (AGGREGATE_TYPE_P (type)
11212 && (aix_struct_return
11213 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11214 return true;
11216 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11217 modes only exist for GCC vector types if -maltivec. */
11218 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11219 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11220 return false;
11222 /* Return synthetic vectors in memory. */
11223 if (TREE_CODE (type) == VECTOR_TYPE
11224 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11226 static bool warned_for_return_big_vectors = false;
11227 if (!warned_for_return_big_vectors)
11229 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11230 "non-standard ABI extension with no compatibility "
11231 "guarantee");
11232 warned_for_return_big_vectors = true;
11234 return true;
11237 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11238 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11239 return true;
11241 return false;
11244 /* Specify whether values returned in registers should be at the most
11245 significant end of a register. We want aggregates returned by
11246 value to match the way aggregates are passed to functions. */
11248 static bool
11249 rs6000_return_in_msb (const_tree valtype)
11251 return (DEFAULT_ABI == ABI_ELFv2
11252 && BYTES_BIG_ENDIAN
11253 && AGGREGATE_TYPE_P (valtype)
11254 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11255 == PAD_UPWARD));
11258 #ifdef HAVE_AS_GNU_ATTRIBUTE
11259 /* Return TRUE if a call to function FNDECL may be one that
11260 potentially affects the function calling ABI of the object file. */
11262 static bool
11263 call_ABI_of_interest (tree fndecl)
11265 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11267 struct cgraph_node *c_node;
11269 /* Libcalls are always interesting. */
11270 if (fndecl == NULL_TREE)
11271 return true;
11273 /* Any call to an external function is interesting. */
11274 if (DECL_EXTERNAL (fndecl))
11275 return true;
11277 /* Interesting functions that we are emitting in this object file. */
11278 c_node = cgraph_node::get (fndecl);
11279 c_node = c_node->ultimate_alias_target ();
11280 return !c_node->only_called_directly_p ();
11282 return false;
11284 #endif
11286 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11287 for a call to a function whose data type is FNTYPE.
11288 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11290 For incoming args we set the number of arguments in the prototype large
11291 so we never return a PARALLEL. */
11293 void
11294 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11295 rtx libname ATTRIBUTE_UNUSED, int incoming,
11296 int libcall, int n_named_args,
11297 tree fndecl ATTRIBUTE_UNUSED,
11298 machine_mode return_mode ATTRIBUTE_UNUSED)
11300 static CUMULATIVE_ARGS zero_cumulative;
11302 *cum = zero_cumulative;
11303 cum->words = 0;
11304 cum->fregno = FP_ARG_MIN_REG;
11305 cum->vregno = ALTIVEC_ARG_MIN_REG;
11306 cum->prototype = (fntype && prototype_p (fntype));
11307 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11308 ? CALL_LIBCALL : CALL_NORMAL);
11309 cum->sysv_gregno = GP_ARG_MIN_REG;
11310 cum->stdarg = stdarg_p (fntype);
11311 cum->libcall = libcall;
11313 cum->nargs_prototype = 0;
11314 if (incoming || cum->prototype)
11315 cum->nargs_prototype = n_named_args;
11317 /* Check for a longcall attribute. */
11318 if ((!fntype && rs6000_default_long_calls)
11319 || (fntype
11320 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11321 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11322 cum->call_cookie |= CALL_LONG;
11324 if (TARGET_DEBUG_ARG)
11326 fprintf (stderr, "\ninit_cumulative_args:");
11327 if (fntype)
11329 tree ret_type = TREE_TYPE (fntype);
11330 fprintf (stderr, " ret code = %s,",
11331 get_tree_code_name (TREE_CODE (ret_type)));
11334 if (cum->call_cookie & CALL_LONG)
11335 fprintf (stderr, " longcall,");
11337 fprintf (stderr, " proto = %d, nargs = %d\n",
11338 cum->prototype, cum->nargs_prototype);
11341 #ifdef HAVE_AS_GNU_ATTRIBUTE
11342 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11344 cum->escapes = call_ABI_of_interest (fndecl);
11345 if (cum->escapes)
11347 tree return_type;
11349 if (fntype)
11351 return_type = TREE_TYPE (fntype);
11352 return_mode = TYPE_MODE (return_type);
11354 else
11355 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11357 if (return_type != NULL)
11359 if (TREE_CODE (return_type) == RECORD_TYPE
11360 && TYPE_TRANSPARENT_AGGR (return_type))
11362 return_type = TREE_TYPE (first_field (return_type));
11363 return_mode = TYPE_MODE (return_type);
11365 if (AGGREGATE_TYPE_P (return_type)
11366 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11367 <= 8))
11368 rs6000_returns_struct = true;
11370 if (SCALAR_FLOAT_MODE_P (return_mode))
11372 rs6000_passes_float = true;
11373 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11374 && (FLOAT128_IBM_P (return_mode)
11375 || FLOAT128_IEEE_P (return_mode)
11376 || (return_type != NULL
11377 && (TYPE_MAIN_VARIANT (return_type)
11378 == long_double_type_node))))
11379 rs6000_passes_long_double = true;
11381 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11382 || PAIRED_VECTOR_MODE (return_mode))
11383 rs6000_passes_vector = true;
11386 #endif
11388 if (fntype
11389 && !TARGET_ALTIVEC
11390 && TARGET_ALTIVEC_ABI
11391 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11393 error ("cannot return value in vector register because"
11394 " altivec instructions are disabled, use %qs"
11395 " to enable them", "-maltivec");
11399 /* The mode the ABI uses for a word. This is not the same as word_mode
11400 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11402 static scalar_int_mode
11403 rs6000_abi_word_mode (void)
11405 return TARGET_32BIT ? SImode : DImode;
11408 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11409 static char *
11410 rs6000_offload_options (void)
11412 if (TARGET_64BIT)
11413 return xstrdup ("-foffload-abi=lp64");
11414 else
11415 return xstrdup ("-foffload-abi=ilp32");
11418 /* On rs6000, function arguments are promoted, as are function return
11419 values. */
11421 static machine_mode
11422 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11423 machine_mode mode,
11424 int *punsignedp ATTRIBUTE_UNUSED,
11425 const_tree, int)
11427 PROMOTE_MODE (mode, *punsignedp, type);
11429 return mode;
11432 /* Return true if TYPE must be passed on the stack and not in registers. */
11434 static bool
11435 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11437 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11438 return must_pass_in_stack_var_size (mode, type);
11439 else
11440 return must_pass_in_stack_var_size_or_pad (mode, type);
11443 static inline bool
11444 is_complex_IBM_long_double (machine_mode mode)
11446 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11449 /* Whether ABI_V4 passes MODE args to a function in floating point
11450 registers. */
11452 static bool
11453 abi_v4_pass_in_fpr (machine_mode mode)
11455 if (!TARGET_HARD_FLOAT)
11456 return false;
11457 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11458 return true;
11459 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11460 return true;
11461 /* ABI_V4 passes complex IBM long double in 8 gprs.
11462 Stupid, but we can't change the ABI now. */
11463 if (is_complex_IBM_long_double (mode))
11464 return false;
11465 if (FLOAT128_2REG_P (mode))
11466 return true;
11467 if (DECIMAL_FLOAT_MODE_P (mode))
11468 return true;
11469 return false;
11472 /* Implement TARGET_FUNCTION_ARG_PADDING.
11474 For the AIX ABI structs are always stored left shifted in their
11475 argument slot. */
11477 static pad_direction
11478 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11480 #ifndef AGGREGATE_PADDING_FIXED
11481 #define AGGREGATE_PADDING_FIXED 0
11482 #endif
11483 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11484 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11485 #endif
11487 if (!AGGREGATE_PADDING_FIXED)
11489 /* GCC used to pass structures of the same size as integer types as
11490 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11491 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11492 passed padded downward, except that -mstrict-align further
11493 muddied the water in that multi-component structures of 2 and 4
11494 bytes in size were passed padded upward.
11496 The following arranges for best compatibility with previous
11497 versions of gcc, but removes the -mstrict-align dependency. */
11498 if (BYTES_BIG_ENDIAN)
11500 HOST_WIDE_INT size = 0;
11502 if (mode == BLKmode)
11504 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11505 size = int_size_in_bytes (type);
11507 else
11508 size = GET_MODE_SIZE (mode);
11510 if (size == 1 || size == 2 || size == 4)
11511 return PAD_DOWNWARD;
11513 return PAD_UPWARD;
11516 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11518 if (type != 0 && AGGREGATE_TYPE_P (type))
11519 return PAD_UPWARD;
11522 /* Fall back to the default. */
11523 return default_function_arg_padding (mode, type);
11526 /* If defined, a C expression that gives the alignment boundary, in bits,
11527 of an argument with the specified mode and type. If it is not defined,
11528 PARM_BOUNDARY is used for all arguments.
11530 V.4 wants long longs and doubles to be double word aligned. Just
11531 testing the mode size is a boneheaded way to do this as it means
11532 that other types such as complex int are also double word aligned.
11533 However, we're stuck with this because changing the ABI might break
11534 existing library interfaces.
11536 Quadword align Altivec/VSX vectors.
11537 Quadword align large synthetic vector types. */
11539 static unsigned int
11540 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11542 machine_mode elt_mode;
11543 int n_elts;
11545 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11547 if (DEFAULT_ABI == ABI_V4
11548 && (GET_MODE_SIZE (mode) == 8
11549 || (TARGET_HARD_FLOAT
11550 && !is_complex_IBM_long_double (mode)
11551 && FLOAT128_2REG_P (mode))))
11552 return 64;
11553 else if (FLOAT128_VECTOR_P (mode))
11554 return 128;
11555 else if (PAIRED_VECTOR_MODE (mode)
11556 || (type && TREE_CODE (type) == VECTOR_TYPE
11557 && int_size_in_bytes (type) >= 8
11558 && int_size_in_bytes (type) < 16))
11559 return 64;
11560 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11561 || (type && TREE_CODE (type) == VECTOR_TYPE
11562 && int_size_in_bytes (type) >= 16))
11563 return 128;
11565 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11566 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11567 -mcompat-align-parm is used. */
11568 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11569 || DEFAULT_ABI == ABI_ELFv2)
11570 && type && TYPE_ALIGN (type) > 64)
11572 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11573 or homogeneous float/vector aggregates here. We already handled
11574 vector aggregates above, but still need to check for float here. */
11575 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11576 && !SCALAR_FLOAT_MODE_P (elt_mode));
11578 /* We used to check for BLKmode instead of the above aggregate type
11579 check. Warn when this results in any difference to the ABI. */
11580 if (aggregate_p != (mode == BLKmode))
11582 static bool warned;
11583 if (!warned && warn_psabi)
11585 warned = true;
11586 inform (input_location,
11587 "the ABI of passing aggregates with %d-byte alignment"
11588 " has changed in GCC 5",
11589 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11593 if (aggregate_p)
11594 return 128;
11597 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11598 implement the "aggregate type" check as a BLKmode check here; this
11599 means certain aggregate types are in fact not aligned. */
11600 if (TARGET_MACHO && rs6000_darwin64_abi
11601 && mode == BLKmode
11602 && type && TYPE_ALIGN (type) > 64)
11603 return 128;
11605 return PARM_BOUNDARY;
11608 /* The offset in words to the start of the parameter save area. */
11610 static unsigned int
11611 rs6000_parm_offset (void)
11613 return (DEFAULT_ABI == ABI_V4 ? 2
11614 : DEFAULT_ABI == ABI_ELFv2 ? 4
11615 : 6);
11618 /* For a function parm of MODE and TYPE, return the starting word in
11619 the parameter area. NWORDS of the parameter area are already used. */
11621 static unsigned int
11622 rs6000_parm_start (machine_mode mode, const_tree type,
11623 unsigned int nwords)
11625 unsigned int align;
11627 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11628 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11631 /* Compute the size (in words) of a function argument. */
11633 static unsigned long
11634 rs6000_arg_size (machine_mode mode, const_tree type)
11636 unsigned long size;
11638 if (mode != BLKmode)
11639 size = GET_MODE_SIZE (mode);
11640 else
11641 size = int_size_in_bytes (type);
11643 if (TARGET_32BIT)
11644 return (size + 3) >> 2;
11645 else
11646 return (size + 7) >> 3;
11649 /* Use this to flush pending int fields. */
11651 static void
11652 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11653 HOST_WIDE_INT bitpos, int final)
11655 unsigned int startbit, endbit;
11656 int intregs, intoffset;
11658 /* Handle the situations where a float is taking up the first half
11659 of the GPR, and the other half is empty (typically due to
11660 alignment restrictions). We can detect this by a 8-byte-aligned
11661 int field, or by seeing that this is the final flush for this
11662 argument. Count the word and continue on. */
11663 if (cum->floats_in_gpr == 1
11664 && (cum->intoffset % 64 == 0
11665 || (cum->intoffset == -1 && final)))
11667 cum->words++;
11668 cum->floats_in_gpr = 0;
11671 if (cum->intoffset == -1)
11672 return;
11674 intoffset = cum->intoffset;
11675 cum->intoffset = -1;
11676 cum->floats_in_gpr = 0;
11678 if (intoffset % BITS_PER_WORD != 0)
11680 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11681 if (!int_mode_for_size (bits, 0).exists ())
11683 /* We couldn't find an appropriate mode, which happens,
11684 e.g., in packed structs when there are 3 bytes to load.
11685 Back intoffset back to the beginning of the word in this
11686 case. */
11687 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11691 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11692 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11693 intregs = (endbit - startbit) / BITS_PER_WORD;
11694 cum->words += intregs;
11695 /* words should be unsigned. */
11696 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11698 int pad = (endbit/BITS_PER_WORD) - cum->words;
11699 cum->words += pad;
11703 /* The darwin64 ABI calls for us to recurse down through structs,
11704 looking for elements passed in registers. Unfortunately, we have
11705 to track int register count here also because of misalignments
11706 in powerpc alignment mode. */
11708 static void
11709 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11710 const_tree type,
11711 HOST_WIDE_INT startbitpos)
11713 tree f;
11715 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11716 if (TREE_CODE (f) == FIELD_DECL)
11718 HOST_WIDE_INT bitpos = startbitpos;
11719 tree ftype = TREE_TYPE (f);
11720 machine_mode mode;
11721 if (ftype == error_mark_node)
11722 continue;
11723 mode = TYPE_MODE (ftype);
11725 if (DECL_SIZE (f) != 0
11726 && tree_fits_uhwi_p (bit_position (f)))
11727 bitpos += int_bit_position (f);
11729 /* ??? FIXME: else assume zero offset. */
11731 if (TREE_CODE (ftype) == RECORD_TYPE)
11732 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11733 else if (USE_FP_FOR_ARG_P (cum, mode))
11735 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11736 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11737 cum->fregno += n_fpregs;
11738 /* Single-precision floats present a special problem for
11739 us, because they are smaller than an 8-byte GPR, and so
11740 the structure-packing rules combined with the standard
11741 varargs behavior mean that we want to pack float/float
11742 and float/int combinations into a single register's
11743 space. This is complicated by the arg advance flushing,
11744 which works on arbitrarily large groups of int-type
11745 fields. */
11746 if (mode == SFmode)
11748 if (cum->floats_in_gpr == 1)
11750 /* Two floats in a word; count the word and reset
11751 the float count. */
11752 cum->words++;
11753 cum->floats_in_gpr = 0;
11755 else if (bitpos % 64 == 0)
11757 /* A float at the beginning of an 8-byte word;
11758 count it and put off adjusting cum->words until
11759 we see if a arg advance flush is going to do it
11760 for us. */
11761 cum->floats_in_gpr++;
11763 else
11765 /* The float is at the end of a word, preceded
11766 by integer fields, so the arg advance flush
11767 just above has already set cum->words and
11768 everything is taken care of. */
11771 else
11772 cum->words += n_fpregs;
11774 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11776 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11777 cum->vregno++;
11778 cum->words += 2;
11780 else if (cum->intoffset == -1)
11781 cum->intoffset = bitpos;
11785 /* Check for an item that needs to be considered specially under the darwin 64
11786 bit ABI. These are record types where the mode is BLK or the structure is
11787 8 bytes in size. */
11788 static int
11789 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11791 return rs6000_darwin64_abi
11792 && ((mode == BLKmode
11793 && TREE_CODE (type) == RECORD_TYPE
11794 && int_size_in_bytes (type) > 0)
11795 || (type && TREE_CODE (type) == RECORD_TYPE
11796 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11799 /* Update the data in CUM to advance over an argument
11800 of mode MODE and data type TYPE.
11801 (TYPE is null for libcalls where that information may not be available.)
11803 Note that for args passed by reference, function_arg will be called
11804 with MODE and TYPE set to that of the pointer to the arg, not the arg
11805 itself. */
11807 static void
11808 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11809 const_tree type, bool named, int depth)
11811 machine_mode elt_mode;
11812 int n_elts;
11814 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11816 /* Only tick off an argument if we're not recursing. */
11817 if (depth == 0)
11818 cum->nargs_prototype--;
11820 #ifdef HAVE_AS_GNU_ATTRIBUTE
11821 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11822 && cum->escapes)
11824 if (SCALAR_FLOAT_MODE_P (mode))
11826 rs6000_passes_float = true;
11827 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11828 && (FLOAT128_IBM_P (mode)
11829 || FLOAT128_IEEE_P (mode)
11830 || (type != NULL
11831 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11832 rs6000_passes_long_double = true;
11834 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11835 || (PAIRED_VECTOR_MODE (mode)
11836 && !cum->stdarg
11837 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11838 rs6000_passes_vector = true;
11840 #endif
11842 if (TARGET_ALTIVEC_ABI
11843 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11844 || (type && TREE_CODE (type) == VECTOR_TYPE
11845 && int_size_in_bytes (type) == 16)))
11847 bool stack = false;
11849 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11851 cum->vregno += n_elts;
11853 if (!TARGET_ALTIVEC)
11854 error ("cannot pass argument in vector register because"
11855 " altivec instructions are disabled, use %qs"
11856 " to enable them", "-maltivec");
11858 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11859 even if it is going to be passed in a vector register.
11860 Darwin does the same for variable-argument functions. */
11861 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11862 && TARGET_64BIT)
11863 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11864 stack = true;
11866 else
11867 stack = true;
11869 if (stack)
11871 int align;
11873 /* Vector parameters must be 16-byte aligned. In 32-bit
11874 mode this means we need to take into account the offset
11875 to the parameter save area. In 64-bit mode, they just
11876 have to start on an even word, since the parameter save
11877 area is 16-byte aligned. */
11878 if (TARGET_32BIT)
11879 align = -(rs6000_parm_offset () + cum->words) & 3;
11880 else
11881 align = cum->words & 1;
11882 cum->words += align + rs6000_arg_size (mode, type);
11884 if (TARGET_DEBUG_ARG)
11886 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11887 cum->words, align);
11888 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11889 cum->nargs_prototype, cum->prototype,
11890 GET_MODE_NAME (mode));
11894 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11896 int size = int_size_in_bytes (type);
11897 /* Variable sized types have size == -1 and are
11898 treated as if consisting entirely of ints.
11899 Pad to 16 byte boundary if needed. */
11900 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11901 && (cum->words % 2) != 0)
11902 cum->words++;
11903 /* For varargs, we can just go up by the size of the struct. */
11904 if (!named)
11905 cum->words += (size + 7) / 8;
11906 else
11908 /* It is tempting to say int register count just goes up by
11909 sizeof(type)/8, but this is wrong in a case such as
11910 { int; double; int; } [powerpc alignment]. We have to
11911 grovel through the fields for these too. */
11912 cum->intoffset = 0;
11913 cum->floats_in_gpr = 0;
11914 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11915 rs6000_darwin64_record_arg_advance_flush (cum,
11916 size * BITS_PER_UNIT, 1);
11918 if (TARGET_DEBUG_ARG)
11920 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11921 cum->words, TYPE_ALIGN (type), size);
11922 fprintf (stderr,
11923 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11924 cum->nargs_prototype, cum->prototype,
11925 GET_MODE_NAME (mode));
11928 else if (DEFAULT_ABI == ABI_V4)
11930 if (abi_v4_pass_in_fpr (mode))
11932 /* _Decimal128 must use an even/odd register pair. This assumes
11933 that the register number is odd when fregno is odd. */
11934 if (mode == TDmode && (cum->fregno % 2) == 1)
11935 cum->fregno++;
11937 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11938 <= FP_ARG_V4_MAX_REG)
11939 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11940 else
11942 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11943 if (mode == DFmode || FLOAT128_IBM_P (mode)
11944 || mode == DDmode || mode == TDmode)
11945 cum->words += cum->words & 1;
11946 cum->words += rs6000_arg_size (mode, type);
11949 else
11951 int n_words = rs6000_arg_size (mode, type);
11952 int gregno = cum->sysv_gregno;
11954 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11955 As does any other 2 word item such as complex int due to a
11956 historical mistake. */
11957 if (n_words == 2)
11958 gregno += (1 - gregno) & 1;
11960 /* Multi-reg args are not split between registers and stack. */
11961 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11963 /* Long long is aligned on the stack. So are other 2 word
11964 items such as complex int due to a historical mistake. */
11965 if (n_words == 2)
11966 cum->words += cum->words & 1;
11967 cum->words += n_words;
11970 /* Note: continuing to accumulate gregno past when we've started
11971 spilling to the stack indicates the fact that we've started
11972 spilling to the stack to expand_builtin_saveregs. */
11973 cum->sysv_gregno = gregno + n_words;
11976 if (TARGET_DEBUG_ARG)
11978 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11979 cum->words, cum->fregno);
11980 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11981 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11982 fprintf (stderr, "mode = %4s, named = %d\n",
11983 GET_MODE_NAME (mode), named);
11986 else
11988 int n_words = rs6000_arg_size (mode, type);
11989 int start_words = cum->words;
11990 int align_words = rs6000_parm_start (mode, type, start_words);
11992 cum->words = align_words + n_words;
11994 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11996 /* _Decimal128 must be passed in an even/odd float register pair.
11997 This assumes that the register number is odd when fregno is
11998 odd. */
11999 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12000 cum->fregno++;
12001 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
12004 if (TARGET_DEBUG_ARG)
12006 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12007 cum->words, cum->fregno);
12008 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
12009 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
12010 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
12011 named, align_words - start_words, depth);
12016 static void
12017 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12018 const_tree type, bool named)
12020 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12024 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12025 structure between cum->intoffset and bitpos to integer registers. */
12027 static void
12028 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12029 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12031 machine_mode mode;
12032 unsigned int regno;
12033 unsigned int startbit, endbit;
12034 int this_regno, intregs, intoffset;
12035 rtx reg;
12037 if (cum->intoffset == -1)
12038 return;
12040 intoffset = cum->intoffset;
12041 cum->intoffset = -1;
12043 /* If this is the trailing part of a word, try to only load that
12044 much into the register. Otherwise load the whole register. Note
12045 that in the latter case we may pick up unwanted bits. It's not a
12046 problem at the moment but may wish to revisit. */
12048 if (intoffset % BITS_PER_WORD != 0)
12050 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
12051 if (!int_mode_for_size (bits, 0).exists (&mode))
12053 /* We couldn't find an appropriate mode, which happens,
12054 e.g., in packed structs when there are 3 bytes to load.
12055 Back intoffset back to the beginning of the word in this
12056 case. */
12057 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12058 mode = word_mode;
12061 else
12062 mode = word_mode;
12064 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12065 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12066 intregs = (endbit - startbit) / BITS_PER_WORD;
12067 this_regno = cum->words + intoffset / BITS_PER_WORD;
12069 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12070 cum->use_stack = 1;
12072 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12073 if (intregs <= 0)
12074 return;
12076 intoffset /= BITS_PER_UNIT;
12079 regno = GP_ARG_MIN_REG + this_regno;
12080 reg = gen_rtx_REG (mode, regno);
12081 rvec[(*k)++] =
12082 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12084 this_regno += 1;
12085 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12086 mode = word_mode;
12087 intregs -= 1;
12089 while (intregs > 0);
12092 /* Recursive workhorse for the following. */
12094 static void
12095 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12096 HOST_WIDE_INT startbitpos, rtx rvec[],
12097 int *k)
12099 tree f;
12101 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12102 if (TREE_CODE (f) == FIELD_DECL)
12104 HOST_WIDE_INT bitpos = startbitpos;
12105 tree ftype = TREE_TYPE (f);
12106 machine_mode mode;
12107 if (ftype == error_mark_node)
12108 continue;
12109 mode = TYPE_MODE (ftype);
12111 if (DECL_SIZE (f) != 0
12112 && tree_fits_uhwi_p (bit_position (f)))
12113 bitpos += int_bit_position (f);
12115 /* ??? FIXME: else assume zero offset. */
12117 if (TREE_CODE (ftype) == RECORD_TYPE)
12118 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12119 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12121 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12122 #if 0
12123 switch (mode)
12125 case E_SCmode: mode = SFmode; break;
12126 case E_DCmode: mode = DFmode; break;
12127 case E_TCmode: mode = TFmode; break;
12128 default: break;
12130 #endif
12131 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12132 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12134 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12135 && (mode == TFmode || mode == TDmode));
12136 /* Long double or _Decimal128 split over regs and memory. */
12137 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12138 cum->use_stack=1;
12140 rvec[(*k)++]
12141 = gen_rtx_EXPR_LIST (VOIDmode,
12142 gen_rtx_REG (mode, cum->fregno++),
12143 GEN_INT (bitpos / BITS_PER_UNIT));
12144 if (FLOAT128_2REG_P (mode))
12145 cum->fregno++;
12147 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12149 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12150 rvec[(*k)++]
12151 = gen_rtx_EXPR_LIST (VOIDmode,
12152 gen_rtx_REG (mode, cum->vregno++),
12153 GEN_INT (bitpos / BITS_PER_UNIT));
12155 else if (cum->intoffset == -1)
12156 cum->intoffset = bitpos;
12160 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12161 the register(s) to be used for each field and subfield of a struct
12162 being passed by value, along with the offset of where the
12163 register's value may be found in the block. FP fields go in FP
12164 register, vector fields go in vector registers, and everything
12165 else goes in int registers, packed as in memory.
12167 This code is also used for function return values. RETVAL indicates
12168 whether this is the case.
12170 Much of this is taken from the SPARC V9 port, which has a similar
12171 calling convention. */
12173 static rtx
12174 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12175 bool named, bool retval)
12177 rtx rvec[FIRST_PSEUDO_REGISTER];
12178 int k = 1, kbase = 1;
12179 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12180 /* This is a copy; modifications are not visible to our caller. */
12181 CUMULATIVE_ARGS copy_cum = *orig_cum;
12182 CUMULATIVE_ARGS *cum = &copy_cum;
12184 /* Pad to 16 byte boundary if needed. */
12185 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12186 && (cum->words % 2) != 0)
12187 cum->words++;
12189 cum->intoffset = 0;
12190 cum->use_stack = 0;
12191 cum->named = named;
12193 /* Put entries into rvec[] for individual FP and vector fields, and
12194 for the chunks of memory that go in int regs. Note we start at
12195 element 1; 0 is reserved for an indication of using memory, and
12196 may or may not be filled in below. */
12197 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12198 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12200 /* If any part of the struct went on the stack put all of it there.
12201 This hack is because the generic code for
12202 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12203 parts of the struct are not at the beginning. */
12204 if (cum->use_stack)
12206 if (retval)
12207 return NULL_RTX; /* doesn't go in registers at all */
12208 kbase = 0;
12209 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12211 if (k > 1 || cum->use_stack)
12212 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12213 else
12214 return NULL_RTX;
12217 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12219 static rtx
12220 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12221 int align_words)
12223 int n_units;
12224 int i, k;
12225 rtx rvec[GP_ARG_NUM_REG + 1];
12227 if (align_words >= GP_ARG_NUM_REG)
12228 return NULL_RTX;
12230 n_units = rs6000_arg_size (mode, type);
12232 /* Optimize the simple case where the arg fits in one gpr, except in
12233 the case of BLKmode due to assign_parms assuming that registers are
12234 BITS_PER_WORD wide. */
12235 if (n_units == 0
12236 || (n_units == 1 && mode != BLKmode))
12237 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12239 k = 0;
12240 if (align_words + n_units > GP_ARG_NUM_REG)
12241 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12242 using a magic NULL_RTX component.
12243 This is not strictly correct. Only some of the arg belongs in
12244 memory, not all of it. However, the normal scheme using
12245 function_arg_partial_nregs can result in unusual subregs, eg.
12246 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12247 store the whole arg to memory is often more efficient than code
12248 to store pieces, and we know that space is available in the right
12249 place for the whole arg. */
12250 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12252 i = 0;
12255 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12256 rtx off = GEN_INT (i++ * 4);
12257 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12259 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12261 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12264 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12265 but must also be copied into the parameter save area starting at
12266 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12267 to the GPRs and/or memory. Return the number of elements used. */
12269 static int
12270 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12271 int align_words, rtx *rvec)
12273 int k = 0;
12275 if (align_words < GP_ARG_NUM_REG)
12277 int n_words = rs6000_arg_size (mode, type);
12279 if (align_words + n_words > GP_ARG_NUM_REG
12280 || mode == BLKmode
12281 || (TARGET_32BIT && TARGET_POWERPC64))
12283 /* If this is partially on the stack, then we only
12284 include the portion actually in registers here. */
12285 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12286 int i = 0;
12288 if (align_words + n_words > GP_ARG_NUM_REG)
12290 /* Not all of the arg fits in gprs. Say that it goes in memory
12291 too, using a magic NULL_RTX component. Also see comment in
12292 rs6000_mixed_function_arg for why the normal
12293 function_arg_partial_nregs scheme doesn't work in this case. */
12294 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12299 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12300 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12301 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12303 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12305 else
12307 /* The whole arg fits in gprs. */
12308 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12309 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12312 else
12314 /* It's entirely in memory. */
12315 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12318 return k;
12321 /* RVEC is a vector of K components of an argument of mode MODE.
12322 Construct the final function_arg return value from it. */
12324 static rtx
12325 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12327 gcc_assert (k >= 1);
12329 /* Avoid returning a PARALLEL in the trivial cases. */
12330 if (k == 1)
12332 if (XEXP (rvec[0], 0) == NULL_RTX)
12333 return NULL_RTX;
12335 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12336 return XEXP (rvec[0], 0);
12339 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12342 /* Determine where to put an argument to a function.
12343 Value is zero to push the argument on the stack,
12344 or a hard register in which to store the argument.
12346 MODE is the argument's machine mode.
12347 TYPE is the data type of the argument (as a tree).
12348 This is null for libcalls where that information may
12349 not be available.
12350 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12351 the preceding args and about the function being called. It is
12352 not modified in this routine.
12353 NAMED is nonzero if this argument is a named parameter
12354 (otherwise it is an extra parameter matching an ellipsis).
12356 On RS/6000 the first eight words of non-FP are normally in registers
12357 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12358 Under V.4, the first 8 FP args are in registers.
12360 If this is floating-point and no prototype is specified, we use
12361 both an FP and integer register (or possibly FP reg and stack). Library
12362 functions (when CALL_LIBCALL is set) always have the proper types for args,
12363 so we can pass the FP value just in one register. emit_library_function
12364 doesn't support PARALLEL anyway.
12366 Note that for args passed by reference, function_arg will be called
12367 with MODE and TYPE set to that of the pointer to the arg, not the arg
12368 itself. */
12370 static rtx
12371 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12372 const_tree type, bool named)
12374 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12375 enum rs6000_abi abi = DEFAULT_ABI;
12376 machine_mode elt_mode;
12377 int n_elts;
12379 /* Return a marker to indicate whether CR1 needs to set or clear the
12380 bit that V.4 uses to say fp args were passed in registers.
12381 Assume that we don't need the marker for software floating point,
12382 or compiler generated library calls. */
12383 if (mode == VOIDmode)
12385 if (abi == ABI_V4
12386 && (cum->call_cookie & CALL_LIBCALL) == 0
12387 && (cum->stdarg
12388 || (cum->nargs_prototype < 0
12389 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12390 && TARGET_HARD_FLOAT)
12391 return GEN_INT (cum->call_cookie
12392 | ((cum->fregno == FP_ARG_MIN_REG)
12393 ? CALL_V4_SET_FP_ARGS
12394 : CALL_V4_CLEAR_FP_ARGS));
12396 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12399 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12401 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12403 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12404 if (rslt != NULL_RTX)
12405 return rslt;
12406 /* Else fall through to usual handling. */
12409 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12411 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12412 rtx r, off;
12413 int i, k = 0;
12415 /* Do we also need to pass this argument in the parameter save area?
12416 Library support functions for IEEE 128-bit are assumed to not need the
12417 value passed both in GPRs and in vector registers. */
12418 if (TARGET_64BIT && !cum->prototype
12419 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12421 int align_words = ROUND_UP (cum->words, 2);
12422 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12425 /* Describe where this argument goes in the vector registers. */
12426 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12428 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12429 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12430 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12433 return rs6000_finish_function_arg (mode, rvec, k);
12435 else if (TARGET_ALTIVEC_ABI
12436 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12437 || (type && TREE_CODE (type) == VECTOR_TYPE
12438 && int_size_in_bytes (type) == 16)))
12440 if (named || abi == ABI_V4)
12441 return NULL_RTX;
12442 else
12444 /* Vector parameters to varargs functions under AIX or Darwin
12445 get passed in memory and possibly also in GPRs. */
12446 int align, align_words, n_words;
12447 machine_mode part_mode;
12449 /* Vector parameters must be 16-byte aligned. In 32-bit
12450 mode this means we need to take into account the offset
12451 to the parameter save area. In 64-bit mode, they just
12452 have to start on an even word, since the parameter save
12453 area is 16-byte aligned. */
12454 if (TARGET_32BIT)
12455 align = -(rs6000_parm_offset () + cum->words) & 3;
12456 else
12457 align = cum->words & 1;
12458 align_words = cum->words + align;
12460 /* Out of registers? Memory, then. */
12461 if (align_words >= GP_ARG_NUM_REG)
12462 return NULL_RTX;
12464 if (TARGET_32BIT && TARGET_POWERPC64)
12465 return rs6000_mixed_function_arg (mode, type, align_words);
12467 /* The vector value goes in GPRs. Only the part of the
12468 value in GPRs is reported here. */
12469 part_mode = mode;
12470 n_words = rs6000_arg_size (mode, type);
12471 if (align_words + n_words > GP_ARG_NUM_REG)
12472 /* Fortunately, there are only two possibilities, the value
12473 is either wholly in GPRs or half in GPRs and half not. */
12474 part_mode = DImode;
12476 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12480 else if (abi == ABI_V4)
12482 if (abi_v4_pass_in_fpr (mode))
12484 /* _Decimal128 must use an even/odd register pair. This assumes
12485 that the register number is odd when fregno is odd. */
12486 if (mode == TDmode && (cum->fregno % 2) == 1)
12487 cum->fregno++;
12489 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12490 <= FP_ARG_V4_MAX_REG)
12491 return gen_rtx_REG (mode, cum->fregno);
12492 else
12493 return NULL_RTX;
12495 else
12497 int n_words = rs6000_arg_size (mode, type);
12498 int gregno = cum->sysv_gregno;
12500 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12501 As does any other 2 word item such as complex int due to a
12502 historical mistake. */
12503 if (n_words == 2)
12504 gregno += (1 - gregno) & 1;
12506 /* Multi-reg args are not split between registers and stack. */
12507 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12508 return NULL_RTX;
12510 if (TARGET_32BIT && TARGET_POWERPC64)
12511 return rs6000_mixed_function_arg (mode, type,
12512 gregno - GP_ARG_MIN_REG);
12513 return gen_rtx_REG (mode, gregno);
12516 else
12518 int align_words = rs6000_parm_start (mode, type, cum->words);
12520 /* _Decimal128 must be passed in an even/odd float register pair.
12521 This assumes that the register number is odd when fregno is odd. */
12522 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12523 cum->fregno++;
12525 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12527 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12528 rtx r, off;
12529 int i, k = 0;
12530 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12531 int fpr_words;
12533 /* Do we also need to pass this argument in the parameter
12534 save area? */
12535 if (type && (cum->nargs_prototype <= 0
12536 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12537 && TARGET_XL_COMPAT
12538 && align_words >= GP_ARG_NUM_REG)))
12539 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12541 /* Describe where this argument goes in the fprs. */
12542 for (i = 0; i < n_elts
12543 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12545 /* Check if the argument is split over registers and memory.
12546 This can only ever happen for long double or _Decimal128;
12547 complex types are handled via split_complex_arg. */
12548 machine_mode fmode = elt_mode;
12549 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12551 gcc_assert (FLOAT128_2REG_P (fmode));
12552 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12555 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12556 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12557 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12560 /* If there were not enough FPRs to hold the argument, the rest
12561 usually goes into memory. However, if the current position
12562 is still within the register parameter area, a portion may
12563 actually have to go into GPRs.
12565 Note that it may happen that the portion of the argument
12566 passed in the first "half" of the first GPR was already
12567 passed in the last FPR as well.
12569 For unnamed arguments, we already set up GPRs to cover the
12570 whole argument in rs6000_psave_function_arg, so there is
12571 nothing further to do at this point. */
12572 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12573 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12574 && cum->nargs_prototype > 0)
12576 static bool warned;
12578 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12579 int n_words = rs6000_arg_size (mode, type);
12581 align_words += fpr_words;
12582 n_words -= fpr_words;
12586 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12587 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12588 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12590 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12592 if (!warned && warn_psabi)
12594 warned = true;
12595 inform (input_location,
12596 "the ABI of passing homogeneous float aggregates"
12597 " has changed in GCC 5");
12601 return rs6000_finish_function_arg (mode, rvec, k);
12603 else if (align_words < GP_ARG_NUM_REG)
12605 if (TARGET_32BIT && TARGET_POWERPC64)
12606 return rs6000_mixed_function_arg (mode, type, align_words);
12608 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12610 else
12611 return NULL_RTX;
12615 /* For an arg passed partly in registers and partly in memory, this is
12616 the number of bytes passed in registers. For args passed entirely in
12617 registers or entirely in memory, zero. When an arg is described by a
12618 PARALLEL, perhaps using more than one register type, this function
12619 returns the number of bytes used by the first element of the PARALLEL. */
12621 static int
12622 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12623 tree type, bool named)
12625 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12626 bool passed_in_gprs = true;
12627 int ret = 0;
12628 int align_words;
12629 machine_mode elt_mode;
12630 int n_elts;
12632 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12634 if (DEFAULT_ABI == ABI_V4)
12635 return 0;
12637 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12639 /* If we are passing this arg in the fixed parameter save area (gprs or
12640 memory) as well as VRs, we do not use the partial bytes mechanism;
12641 instead, rs6000_function_arg will return a PARALLEL including a memory
12642 element as necessary. Library support functions for IEEE 128-bit are
12643 assumed to not need the value passed both in GPRs and in vector
12644 registers. */
12645 if (TARGET_64BIT && !cum->prototype
12646 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12647 return 0;
12649 /* Otherwise, we pass in VRs only. Check for partial copies. */
12650 passed_in_gprs = false;
12651 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12652 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12655 /* In this complicated case we just disable the partial_nregs code. */
12656 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12657 return 0;
12659 align_words = rs6000_parm_start (mode, type, cum->words);
12661 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12663 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12665 /* If we are passing this arg in the fixed parameter save area
12666 (gprs or memory) as well as FPRs, we do not use the partial
12667 bytes mechanism; instead, rs6000_function_arg will return a
12668 PARALLEL including a memory element as necessary. */
12669 if (type
12670 && (cum->nargs_prototype <= 0
12671 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12672 && TARGET_XL_COMPAT
12673 && align_words >= GP_ARG_NUM_REG)))
12674 return 0;
12676 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12677 passed_in_gprs = false;
12678 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12680 /* Compute number of bytes / words passed in FPRs. If there
12681 is still space available in the register parameter area
12682 *after* that amount, a part of the argument will be passed
12683 in GPRs. In that case, the total amount passed in any
12684 registers is equal to the amount that would have been passed
12685 in GPRs if everything were passed there, so we fall back to
12686 the GPR code below to compute the appropriate value. */
12687 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12688 * MIN (8, GET_MODE_SIZE (elt_mode)));
12689 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12691 if (align_words + fpr_words < GP_ARG_NUM_REG)
12692 passed_in_gprs = true;
12693 else
12694 ret = fpr;
12698 if (passed_in_gprs
12699 && align_words < GP_ARG_NUM_REG
12700 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12701 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12703 if (ret != 0 && TARGET_DEBUG_ARG)
12704 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12706 return ret;
12709 /* A C expression that indicates when an argument must be passed by
12710 reference. If nonzero for an argument, a copy of that argument is
12711 made in memory and a pointer to the argument is passed instead of
12712 the argument itself. The pointer is passed in whatever way is
12713 appropriate for passing a pointer to that type.
12715 Under V.4, aggregates and long double are passed by reference.
12717 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12718 reference unless the AltiVec vector extension ABI is in force.
12720 As an extension to all ABIs, variable sized types are passed by
12721 reference. */
12723 static bool
12724 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12725 machine_mode mode, const_tree type,
12726 bool named ATTRIBUTE_UNUSED)
12728 if (!type)
12729 return 0;
12731 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12732 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12734 if (TARGET_DEBUG_ARG)
12735 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12736 return 1;
12739 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12741 if (TARGET_DEBUG_ARG)
12742 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12743 return 1;
12746 if (int_size_in_bytes (type) < 0)
12748 if (TARGET_DEBUG_ARG)
12749 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12750 return 1;
12753 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12754 modes only exist for GCC vector types if -maltivec. */
12755 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12757 if (TARGET_DEBUG_ARG)
12758 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12759 return 1;
12762 /* Pass synthetic vectors in memory. */
12763 if (TREE_CODE (type) == VECTOR_TYPE
12764 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12766 static bool warned_for_pass_big_vectors = false;
12767 if (TARGET_DEBUG_ARG)
12768 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12769 if (!warned_for_pass_big_vectors)
12771 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12772 "non-standard ABI extension with no compatibility "
12773 "guarantee");
12774 warned_for_pass_big_vectors = true;
12776 return 1;
12779 return 0;
12782 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12783 already processes. Return true if the parameter must be passed
12784 (fully or partially) on the stack. */
12786 static bool
12787 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12789 machine_mode mode;
12790 int unsignedp;
12791 rtx entry_parm;
12793 /* Catch errors. */
12794 if (type == NULL || type == error_mark_node)
12795 return true;
12797 /* Handle types with no storage requirement. */
12798 if (TYPE_MODE (type) == VOIDmode)
12799 return false;
12801 /* Handle complex types. */
12802 if (TREE_CODE (type) == COMPLEX_TYPE)
12803 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12804 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12806 /* Handle transparent aggregates. */
12807 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12808 && TYPE_TRANSPARENT_AGGR (type))
12809 type = TREE_TYPE (first_field (type));
12811 /* See if this arg was passed by invisible reference. */
12812 if (pass_by_reference (get_cumulative_args (args_so_far),
12813 TYPE_MODE (type), type, true))
12814 type = build_pointer_type (type);
12816 /* Find mode as it is passed by the ABI. */
12817 unsignedp = TYPE_UNSIGNED (type);
12818 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12820 /* If we must pass in stack, we need a stack. */
12821 if (rs6000_must_pass_in_stack (mode, type))
12822 return true;
12824 /* If there is no incoming register, we need a stack. */
12825 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12826 if (entry_parm == NULL)
12827 return true;
12829 /* Likewise if we need to pass both in registers and on the stack. */
12830 if (GET_CODE (entry_parm) == PARALLEL
12831 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12832 return true;
12834 /* Also true if we're partially in registers and partially not. */
12835 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12836 return true;
12838 /* Update info on where next arg arrives in registers. */
12839 rs6000_function_arg_advance (args_so_far, mode, type, true);
12840 return false;
12843 /* Return true if FUN has no prototype, has a variable argument
12844 list, or passes any parameter in memory. */
12846 static bool
12847 rs6000_function_parms_need_stack (tree fun, bool incoming)
12849 tree fntype, result;
12850 CUMULATIVE_ARGS args_so_far_v;
12851 cumulative_args_t args_so_far;
12853 if (!fun)
12854 /* Must be a libcall, all of which only use reg parms. */
12855 return false;
12857 fntype = fun;
12858 if (!TYPE_P (fun))
12859 fntype = TREE_TYPE (fun);
12861 /* Varargs functions need the parameter save area. */
12862 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12863 return true;
12865 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12866 args_so_far = pack_cumulative_args (&args_so_far_v);
12868 /* When incoming, we will have been passed the function decl.
12869 It is necessary to use the decl to handle K&R style functions,
12870 where TYPE_ARG_TYPES may not be available. */
12871 if (incoming)
12873 gcc_assert (DECL_P (fun));
12874 result = DECL_RESULT (fun);
12876 else
12877 result = TREE_TYPE (fntype);
12879 if (result && aggregate_value_p (result, fntype))
12881 if (!TYPE_P (result))
12882 result = TREE_TYPE (result);
12883 result = build_pointer_type (result);
12884 rs6000_parm_needs_stack (args_so_far, result);
12887 if (incoming)
12889 tree parm;
12891 for (parm = DECL_ARGUMENTS (fun);
12892 parm && parm != void_list_node;
12893 parm = TREE_CHAIN (parm))
12894 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12895 return true;
12897 else
12899 function_args_iterator args_iter;
12900 tree arg_type;
12902 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12903 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12904 return true;
12907 return false;
12910 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12911 usually a constant depending on the ABI. However, in the ELFv2 ABI
12912 the register parameter area is optional when calling a function that
12913 has a prototype is scope, has no variable argument list, and passes
12914 all parameters in registers. */
12917 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12919 int reg_parm_stack_space;
12921 switch (DEFAULT_ABI)
12923 default:
12924 reg_parm_stack_space = 0;
12925 break;
12927 case ABI_AIX:
12928 case ABI_DARWIN:
12929 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12930 break;
12932 case ABI_ELFv2:
12933 /* ??? Recomputing this every time is a bit expensive. Is there
12934 a place to cache this information? */
12935 if (rs6000_function_parms_need_stack (fun, incoming))
12936 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12937 else
12938 reg_parm_stack_space = 0;
12939 break;
12942 return reg_parm_stack_space;
12945 static void
12946 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12948 int i;
12949 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12951 if (nregs == 0)
12952 return;
12954 for (i = 0; i < nregs; i++)
12956 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12957 if (reload_completed)
12959 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12960 tem = NULL_RTX;
12961 else
12962 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12963 i * GET_MODE_SIZE (reg_mode));
12965 else
12966 tem = replace_equiv_address (tem, XEXP (tem, 0));
12968 gcc_assert (tem);
12970 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12974 /* Perform any needed actions needed for a function that is receiving a
12975 variable number of arguments.
12977 CUM is as above.
12979 MODE and TYPE are the mode and type of the current parameter.
12981 PRETEND_SIZE is a variable that should be set to the amount of stack
12982 that must be pushed by the prolog to pretend that our caller pushed
12985 Normally, this macro will push all remaining incoming registers on the
12986 stack and set PRETEND_SIZE to the length of the registers pushed. */
12988 static void
12989 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12990 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12991 int no_rtl)
12993 CUMULATIVE_ARGS next_cum;
12994 int reg_size = TARGET_32BIT ? 4 : 8;
12995 rtx save_area = NULL_RTX, mem;
12996 int first_reg_offset;
12997 alias_set_type set;
12999 /* Skip the last named argument. */
13000 next_cum = *get_cumulative_args (cum);
13001 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
13003 if (DEFAULT_ABI == ABI_V4)
13005 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
13007 if (! no_rtl)
13009 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
13010 HOST_WIDE_INT offset = 0;
13012 /* Try to optimize the size of the varargs save area.
13013 The ABI requires that ap.reg_save_area is doubleword
13014 aligned, but we don't need to allocate space for all
13015 the bytes, only those to which we actually will save
13016 anything. */
13017 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13018 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13019 if (TARGET_HARD_FLOAT
13020 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13021 && cfun->va_list_fpr_size)
13023 if (gpr_reg_num)
13024 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13025 * UNITS_PER_FP_WORD;
13026 if (cfun->va_list_fpr_size
13027 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13028 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13029 else
13030 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13031 * UNITS_PER_FP_WORD;
13033 if (gpr_reg_num)
13035 offset = -((first_reg_offset * reg_size) & ~7);
13036 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13038 gpr_reg_num = cfun->va_list_gpr_size;
13039 if (reg_size == 4 && (first_reg_offset & 1))
13040 gpr_reg_num++;
13042 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13044 else if (fpr_size)
13045 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13046 * UNITS_PER_FP_WORD
13047 - (int) (GP_ARG_NUM_REG * reg_size);
13049 if (gpr_size + fpr_size)
13051 rtx reg_save_area
13052 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13053 gcc_assert (GET_CODE (reg_save_area) == MEM);
13054 reg_save_area = XEXP (reg_save_area, 0);
13055 if (GET_CODE (reg_save_area) == PLUS)
13057 gcc_assert (XEXP (reg_save_area, 0)
13058 == virtual_stack_vars_rtx);
13059 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13060 offset += INTVAL (XEXP (reg_save_area, 1));
13062 else
13063 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13066 cfun->machine->varargs_save_offset = offset;
13067 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13070 else
13072 first_reg_offset = next_cum.words;
13073 save_area = crtl->args.internal_arg_pointer;
13075 if (targetm.calls.must_pass_in_stack (mode, type))
13076 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13079 set = get_varargs_alias_set ();
13080 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13081 && cfun->va_list_gpr_size)
13083 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13085 if (va_list_gpr_counter_field)
13086 /* V4 va_list_gpr_size counts number of registers needed. */
13087 n_gpr = cfun->va_list_gpr_size;
13088 else
13089 /* char * va_list instead counts number of bytes needed. */
13090 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13092 if (nregs > n_gpr)
13093 nregs = n_gpr;
13095 mem = gen_rtx_MEM (BLKmode,
13096 plus_constant (Pmode, save_area,
13097 first_reg_offset * reg_size));
13098 MEM_NOTRAP_P (mem) = 1;
13099 set_mem_alias_set (mem, set);
13100 set_mem_align (mem, BITS_PER_WORD);
13102 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13103 nregs);
13106 /* Save FP registers if needed. */
13107 if (DEFAULT_ABI == ABI_V4
13108 && TARGET_HARD_FLOAT
13109 && ! no_rtl
13110 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13111 && cfun->va_list_fpr_size)
13113 int fregno = next_cum.fregno, nregs;
13114 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13115 rtx lab = gen_label_rtx ();
13116 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13117 * UNITS_PER_FP_WORD);
13119 emit_jump_insn
13120 (gen_rtx_SET (pc_rtx,
13121 gen_rtx_IF_THEN_ELSE (VOIDmode,
13122 gen_rtx_NE (VOIDmode, cr1,
13123 const0_rtx),
13124 gen_rtx_LABEL_REF (VOIDmode, lab),
13125 pc_rtx)));
13127 for (nregs = 0;
13128 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13129 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13131 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13132 ? DFmode : SFmode,
13133 plus_constant (Pmode, save_area, off));
13134 MEM_NOTRAP_P (mem) = 1;
13135 set_mem_alias_set (mem, set);
13136 set_mem_align (mem, GET_MODE_ALIGNMENT (
13137 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13138 ? DFmode : SFmode));
13139 emit_move_insn (mem, gen_rtx_REG (
13140 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13141 ? DFmode : SFmode, fregno));
13144 emit_label (lab);
13148 /* Create the va_list data type. */
13150 static tree
13151 rs6000_build_builtin_va_list (void)
13153 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13155 /* For AIX, prefer 'char *' because that's what the system
13156 header files like. */
13157 if (DEFAULT_ABI != ABI_V4)
13158 return build_pointer_type (char_type_node);
13160 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13161 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13162 get_identifier ("__va_list_tag"), record);
13164 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13165 unsigned_char_type_node);
13166 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13167 unsigned_char_type_node);
13168 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13169 every user file. */
13170 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13171 get_identifier ("reserved"), short_unsigned_type_node);
13172 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13173 get_identifier ("overflow_arg_area"),
13174 ptr_type_node);
13175 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13176 get_identifier ("reg_save_area"),
13177 ptr_type_node);
13179 va_list_gpr_counter_field = f_gpr;
13180 va_list_fpr_counter_field = f_fpr;
13182 DECL_FIELD_CONTEXT (f_gpr) = record;
13183 DECL_FIELD_CONTEXT (f_fpr) = record;
13184 DECL_FIELD_CONTEXT (f_res) = record;
13185 DECL_FIELD_CONTEXT (f_ovf) = record;
13186 DECL_FIELD_CONTEXT (f_sav) = record;
13188 TYPE_STUB_DECL (record) = type_decl;
13189 TYPE_NAME (record) = type_decl;
13190 TYPE_FIELDS (record) = f_gpr;
13191 DECL_CHAIN (f_gpr) = f_fpr;
13192 DECL_CHAIN (f_fpr) = f_res;
13193 DECL_CHAIN (f_res) = f_ovf;
13194 DECL_CHAIN (f_ovf) = f_sav;
13196 layout_type (record);
13198 /* The correct type is an array type of one element. */
13199 return build_array_type (record, build_index_type (size_zero_node));
13202 /* Implement va_start. */
13204 static void
13205 rs6000_va_start (tree valist, rtx nextarg)
13207 HOST_WIDE_INT words, n_gpr, n_fpr;
13208 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13209 tree gpr, fpr, ovf, sav, t;
13211 /* Only SVR4 needs something special. */
13212 if (DEFAULT_ABI != ABI_V4)
13214 std_expand_builtin_va_start (valist, nextarg);
13215 return;
13218 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13219 f_fpr = DECL_CHAIN (f_gpr);
13220 f_res = DECL_CHAIN (f_fpr);
13221 f_ovf = DECL_CHAIN (f_res);
13222 f_sav = DECL_CHAIN (f_ovf);
13224 valist = build_simple_mem_ref (valist);
13225 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13226 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13227 f_fpr, NULL_TREE);
13228 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13229 f_ovf, NULL_TREE);
13230 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13231 f_sav, NULL_TREE);
13233 /* Count number of gp and fp argument registers used. */
13234 words = crtl->args.info.words;
13235 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13236 GP_ARG_NUM_REG);
13237 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13238 FP_ARG_NUM_REG);
13240 if (TARGET_DEBUG_ARG)
13241 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13242 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13243 words, n_gpr, n_fpr);
13245 if (cfun->va_list_gpr_size)
13247 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13248 build_int_cst (NULL_TREE, n_gpr));
13249 TREE_SIDE_EFFECTS (t) = 1;
13250 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13253 if (cfun->va_list_fpr_size)
13255 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13256 build_int_cst (NULL_TREE, n_fpr));
13257 TREE_SIDE_EFFECTS (t) = 1;
13258 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13260 #ifdef HAVE_AS_GNU_ATTRIBUTE
13261 if (call_ABI_of_interest (cfun->decl))
13262 rs6000_passes_float = true;
13263 #endif
13266 /* Find the overflow area. */
13267 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13268 if (words != 0)
13269 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13270 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13271 TREE_SIDE_EFFECTS (t) = 1;
13272 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13274 /* If there were no va_arg invocations, don't set up the register
13275 save area. */
13276 if (!cfun->va_list_gpr_size
13277 && !cfun->va_list_fpr_size
13278 && n_gpr < GP_ARG_NUM_REG
13279 && n_fpr < FP_ARG_V4_MAX_REG)
13280 return;
13282 /* Find the register save area. */
13283 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13284 if (cfun->machine->varargs_save_offset)
13285 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13286 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13287 TREE_SIDE_EFFECTS (t) = 1;
13288 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13291 /* Implement va_arg. */
13293 static tree
13294 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13295 gimple_seq *post_p)
13297 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13298 tree gpr, fpr, ovf, sav, reg, t, u;
13299 int size, rsize, n_reg, sav_ofs, sav_scale;
13300 tree lab_false, lab_over, addr;
13301 int align;
13302 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13303 int regalign = 0;
13304 gimple *stmt;
13306 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13308 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13309 return build_va_arg_indirect_ref (t);
13312 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13313 earlier version of gcc, with the property that it always applied alignment
13314 adjustments to the va-args (even for zero-sized types). The cheapest way
13315 to deal with this is to replicate the effect of the part of
13316 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13317 of relevance.
13318 We don't need to check for pass-by-reference because of the test above.
13319 We can return a simplifed answer, since we know there's no offset to add. */
13321 if (((TARGET_MACHO
13322 && rs6000_darwin64_abi)
13323 || DEFAULT_ABI == ABI_ELFv2
13324 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13325 && integer_zerop (TYPE_SIZE (type)))
13327 unsigned HOST_WIDE_INT align, boundary;
13328 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13329 align = PARM_BOUNDARY / BITS_PER_UNIT;
13330 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13331 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13332 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13333 boundary /= BITS_PER_UNIT;
13334 if (boundary > align)
13336 tree t ;
13337 /* This updates arg ptr by the amount that would be necessary
13338 to align the zero-sized (but not zero-alignment) item. */
13339 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13340 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13341 gimplify_and_add (t, pre_p);
13343 t = fold_convert (sizetype, valist_tmp);
13344 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13345 fold_convert (TREE_TYPE (valist),
13346 fold_build2 (BIT_AND_EXPR, sizetype, t,
13347 size_int (-boundary))));
13348 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13349 gimplify_and_add (t, pre_p);
13351 /* Since it is zero-sized there's no increment for the item itself. */
13352 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13353 return build_va_arg_indirect_ref (valist_tmp);
13356 if (DEFAULT_ABI != ABI_V4)
13358 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13360 tree elem_type = TREE_TYPE (type);
13361 machine_mode elem_mode = TYPE_MODE (elem_type);
13362 int elem_size = GET_MODE_SIZE (elem_mode);
13364 if (elem_size < UNITS_PER_WORD)
13366 tree real_part, imag_part;
13367 gimple_seq post = NULL;
13369 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13370 &post);
13371 /* Copy the value into a temporary, lest the formal temporary
13372 be reused out from under us. */
13373 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13374 gimple_seq_add_seq (pre_p, post);
13376 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13377 post_p);
13379 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13383 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13386 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13387 f_fpr = DECL_CHAIN (f_gpr);
13388 f_res = DECL_CHAIN (f_fpr);
13389 f_ovf = DECL_CHAIN (f_res);
13390 f_sav = DECL_CHAIN (f_ovf);
13392 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13393 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13394 f_fpr, NULL_TREE);
13395 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13396 f_ovf, NULL_TREE);
13397 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13398 f_sav, NULL_TREE);
13400 size = int_size_in_bytes (type);
13401 rsize = (size + 3) / 4;
13402 int pad = 4 * rsize - size;
13403 align = 1;
13405 machine_mode mode = TYPE_MODE (type);
13406 if (abi_v4_pass_in_fpr (mode))
13408 /* FP args go in FP registers, if present. */
13409 reg = fpr;
13410 n_reg = (size + 7) / 8;
13411 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13412 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13413 if (mode != SFmode && mode != SDmode)
13414 align = 8;
13416 else
13418 /* Otherwise into GP registers. */
13419 reg = gpr;
13420 n_reg = rsize;
13421 sav_ofs = 0;
13422 sav_scale = 4;
13423 if (n_reg == 2)
13424 align = 8;
13427 /* Pull the value out of the saved registers.... */
13429 lab_over = NULL;
13430 addr = create_tmp_var (ptr_type_node, "addr");
13432 /* AltiVec vectors never go in registers when -mabi=altivec. */
13433 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13434 align = 16;
13435 else
13437 lab_false = create_artificial_label (input_location);
13438 lab_over = create_artificial_label (input_location);
13440 /* Long long is aligned in the registers. As are any other 2 gpr
13441 item such as complex int due to a historical mistake. */
13442 u = reg;
13443 if (n_reg == 2 && reg == gpr)
13445 regalign = 1;
13446 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13447 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13448 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13449 unshare_expr (reg), u);
13451 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13452 reg number is 0 for f1, so we want to make it odd. */
13453 else if (reg == fpr && mode == TDmode)
13455 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13456 build_int_cst (TREE_TYPE (reg), 1));
13457 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13460 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13461 t = build2 (GE_EXPR, boolean_type_node, u, t);
13462 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13463 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13464 gimplify_and_add (t, pre_p);
13466 t = sav;
13467 if (sav_ofs)
13468 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13470 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13471 build_int_cst (TREE_TYPE (reg), n_reg));
13472 u = fold_convert (sizetype, u);
13473 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13474 t = fold_build_pointer_plus (t, u);
13476 /* _Decimal32 varargs are located in the second word of the 64-bit
13477 FP register for 32-bit binaries. */
13478 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13479 t = fold_build_pointer_plus_hwi (t, size);
13481 /* Args are passed right-aligned. */
13482 if (BYTES_BIG_ENDIAN)
13483 t = fold_build_pointer_plus_hwi (t, pad);
13485 gimplify_assign (addr, t, pre_p);
13487 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13489 stmt = gimple_build_label (lab_false);
13490 gimple_seq_add_stmt (pre_p, stmt);
13492 if ((n_reg == 2 && !regalign) || n_reg > 2)
13494 /* Ensure that we don't find any more args in regs.
13495 Alignment has taken care of for special cases. */
13496 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13500 /* ... otherwise out of the overflow area. */
13502 /* Care for on-stack alignment if needed. */
13503 t = ovf;
13504 if (align != 1)
13506 t = fold_build_pointer_plus_hwi (t, align - 1);
13507 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13508 build_int_cst (TREE_TYPE (t), -align));
13511 /* Args are passed right-aligned. */
13512 if (BYTES_BIG_ENDIAN)
13513 t = fold_build_pointer_plus_hwi (t, pad);
13515 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13517 gimplify_assign (unshare_expr (addr), t, pre_p);
13519 t = fold_build_pointer_plus_hwi (t, size);
13520 gimplify_assign (unshare_expr (ovf), t, pre_p);
13522 if (lab_over)
13524 stmt = gimple_build_label (lab_over);
13525 gimple_seq_add_stmt (pre_p, stmt);
13528 if (STRICT_ALIGNMENT
13529 && (TYPE_ALIGN (type)
13530 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13532 /* The value (of type complex double, for example) may not be
13533 aligned in memory in the saved registers, so copy via a
13534 temporary. (This is the same code as used for SPARC.) */
13535 tree tmp = create_tmp_var (type, "va_arg_tmp");
13536 tree dest_addr = build_fold_addr_expr (tmp);
13538 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13539 3, dest_addr, addr, size_int (rsize * 4));
13541 gimplify_and_add (copy, pre_p);
13542 addr = dest_addr;
13545 addr = fold_convert (ptrtype, addr);
13546 return build_va_arg_indirect_ref (addr);
13549 /* Builtins. */
13551 static void
13552 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13554 tree t;
13555 unsigned classify = rs6000_builtin_info[(int)code].attr;
13556 const char *attr_string = "";
13558 gcc_assert (name != NULL);
13559 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13561 if (rs6000_builtin_decls[(int)code])
13562 fatal_error (input_location,
13563 "internal error: builtin function %qs already processed",
13564 name);
13566 rs6000_builtin_decls[(int)code] = t =
13567 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13569 /* Set any special attributes. */
13570 if ((classify & RS6000_BTC_CONST) != 0)
13572 /* const function, function only depends on the inputs. */
13573 TREE_READONLY (t) = 1;
13574 TREE_NOTHROW (t) = 1;
13575 attr_string = ", const";
13577 else if ((classify & RS6000_BTC_PURE) != 0)
13579 /* pure function, function can read global memory, but does not set any
13580 external state. */
13581 DECL_PURE_P (t) = 1;
13582 TREE_NOTHROW (t) = 1;
13583 attr_string = ", pure";
13585 else if ((classify & RS6000_BTC_FP) != 0)
13587 /* Function is a math function. If rounding mode is on, then treat the
13588 function as not reading global memory, but it can have arbitrary side
13589 effects. If it is off, then assume the function is a const function.
13590 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13591 builtin-attribute.def that is used for the math functions. */
13592 TREE_NOTHROW (t) = 1;
13593 if (flag_rounding_math)
13595 DECL_PURE_P (t) = 1;
13596 DECL_IS_NOVOPS (t) = 1;
13597 attr_string = ", fp, pure";
13599 else
13601 TREE_READONLY (t) = 1;
13602 attr_string = ", fp, const";
13605 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13606 gcc_unreachable ();
13608 if (TARGET_DEBUG_BUILTIN)
13609 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13610 (int)code, name, attr_string);
13613 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13615 #undef RS6000_BUILTIN_0
13616 #undef RS6000_BUILTIN_1
13617 #undef RS6000_BUILTIN_2
13618 #undef RS6000_BUILTIN_3
13619 #undef RS6000_BUILTIN_A
13620 #undef RS6000_BUILTIN_D
13621 #undef RS6000_BUILTIN_H
13622 #undef RS6000_BUILTIN_P
13623 #undef RS6000_BUILTIN_Q
13624 #undef RS6000_BUILTIN_X
13626 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13627 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13628 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13629 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13630 { MASK, ICODE, NAME, ENUM },
13632 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13633 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13634 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13635 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13636 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13637 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13639 static const struct builtin_description bdesc_3arg[] =
13641 #include "rs6000-builtin.def"
13644 /* DST operations: void foo (void *, const int, const char). */
13646 #undef RS6000_BUILTIN_0
13647 #undef RS6000_BUILTIN_1
13648 #undef RS6000_BUILTIN_2
13649 #undef RS6000_BUILTIN_3
13650 #undef RS6000_BUILTIN_A
13651 #undef RS6000_BUILTIN_D
13652 #undef RS6000_BUILTIN_H
13653 #undef RS6000_BUILTIN_P
13654 #undef RS6000_BUILTIN_Q
13655 #undef RS6000_BUILTIN_X
13657 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13658 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13659 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13660 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13661 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13662 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13663 { MASK, ICODE, NAME, ENUM },
13665 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13666 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13667 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13668 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13670 static const struct builtin_description bdesc_dst[] =
13672 #include "rs6000-builtin.def"
13675 /* Simple binary operations: VECc = foo (VECa, VECb). */
13677 #undef RS6000_BUILTIN_0
13678 #undef RS6000_BUILTIN_1
13679 #undef RS6000_BUILTIN_2
13680 #undef RS6000_BUILTIN_3
13681 #undef RS6000_BUILTIN_A
13682 #undef RS6000_BUILTIN_D
13683 #undef RS6000_BUILTIN_H
13684 #undef RS6000_BUILTIN_P
13685 #undef RS6000_BUILTIN_Q
13686 #undef RS6000_BUILTIN_X
13688 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13689 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13690 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13691 { MASK, ICODE, NAME, ENUM },
13693 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13694 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13695 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13696 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13697 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13698 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13699 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13701 static const struct builtin_description bdesc_2arg[] =
13703 #include "rs6000-builtin.def"
13706 #undef RS6000_BUILTIN_0
13707 #undef RS6000_BUILTIN_1
13708 #undef RS6000_BUILTIN_2
13709 #undef RS6000_BUILTIN_3
13710 #undef RS6000_BUILTIN_A
13711 #undef RS6000_BUILTIN_D
13712 #undef RS6000_BUILTIN_H
13713 #undef RS6000_BUILTIN_P
13714 #undef RS6000_BUILTIN_Q
13715 #undef RS6000_BUILTIN_X
13717 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13718 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13719 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13720 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13721 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13722 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13723 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13724 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13725 { MASK, ICODE, NAME, ENUM },
13727 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13728 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13730 /* AltiVec predicates. */
13732 static const struct builtin_description bdesc_altivec_preds[] =
13734 #include "rs6000-builtin.def"
13737 /* PAIRED predicates. */
13738 #undef RS6000_BUILTIN_0
13739 #undef RS6000_BUILTIN_1
13740 #undef RS6000_BUILTIN_2
13741 #undef RS6000_BUILTIN_3
13742 #undef RS6000_BUILTIN_A
13743 #undef RS6000_BUILTIN_D
13744 #undef RS6000_BUILTIN_H
13745 #undef RS6000_BUILTIN_P
13746 #undef RS6000_BUILTIN_Q
13747 #undef RS6000_BUILTIN_X
13749 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13750 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13751 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13752 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13753 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13754 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13755 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13756 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13757 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13758 { MASK, ICODE, NAME, ENUM },
13760 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13762 static const struct builtin_description bdesc_paired_preds[] =
13764 #include "rs6000-builtin.def"
13767 /* ABS* operations. */
13769 #undef RS6000_BUILTIN_0
13770 #undef RS6000_BUILTIN_1
13771 #undef RS6000_BUILTIN_2
13772 #undef RS6000_BUILTIN_3
13773 #undef RS6000_BUILTIN_A
13774 #undef RS6000_BUILTIN_D
13775 #undef RS6000_BUILTIN_H
13776 #undef RS6000_BUILTIN_P
13777 #undef RS6000_BUILTIN_Q
13778 #undef RS6000_BUILTIN_X
13780 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13781 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13782 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13783 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13784 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13785 { MASK, ICODE, NAME, ENUM },
13787 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13788 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13789 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13790 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13791 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13793 static const struct builtin_description bdesc_abs[] =
13795 #include "rs6000-builtin.def"
13798 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13799 foo (VECa). */
13801 #undef RS6000_BUILTIN_0
13802 #undef RS6000_BUILTIN_1
13803 #undef RS6000_BUILTIN_2
13804 #undef RS6000_BUILTIN_3
13805 #undef RS6000_BUILTIN_A
13806 #undef RS6000_BUILTIN_D
13807 #undef RS6000_BUILTIN_H
13808 #undef RS6000_BUILTIN_P
13809 #undef RS6000_BUILTIN_Q
13810 #undef RS6000_BUILTIN_X
13812 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13813 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13814 { MASK, ICODE, NAME, ENUM },
13816 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13817 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13818 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13819 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13820 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13821 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13822 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13823 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13825 static const struct builtin_description bdesc_1arg[] =
13827 #include "rs6000-builtin.def"
13830 /* Simple no-argument operations: result = __builtin_darn_32 () */
13832 #undef RS6000_BUILTIN_0
13833 #undef RS6000_BUILTIN_1
13834 #undef RS6000_BUILTIN_2
13835 #undef RS6000_BUILTIN_3
13836 #undef RS6000_BUILTIN_A
13837 #undef RS6000_BUILTIN_D
13838 #undef RS6000_BUILTIN_H
13839 #undef RS6000_BUILTIN_P
13840 #undef RS6000_BUILTIN_Q
13841 #undef RS6000_BUILTIN_X
13843 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13844 { MASK, ICODE, NAME, ENUM },
13846 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13847 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13848 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13849 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13850 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13851 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13852 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13853 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13854 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13856 static const struct builtin_description bdesc_0arg[] =
13858 #include "rs6000-builtin.def"
13861 /* HTM builtins. */
13862 #undef RS6000_BUILTIN_0
13863 #undef RS6000_BUILTIN_1
13864 #undef RS6000_BUILTIN_2
13865 #undef RS6000_BUILTIN_3
13866 #undef RS6000_BUILTIN_A
13867 #undef RS6000_BUILTIN_D
13868 #undef RS6000_BUILTIN_H
13869 #undef RS6000_BUILTIN_P
13870 #undef RS6000_BUILTIN_Q
13871 #undef RS6000_BUILTIN_X
13873 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13874 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13875 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13876 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13877 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13878 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13879 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13880 { MASK, ICODE, NAME, ENUM },
13882 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13883 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13884 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13886 static const struct builtin_description bdesc_htm[] =
13888 #include "rs6000-builtin.def"
13891 #undef RS6000_BUILTIN_0
13892 #undef RS6000_BUILTIN_1
13893 #undef RS6000_BUILTIN_2
13894 #undef RS6000_BUILTIN_3
13895 #undef RS6000_BUILTIN_A
13896 #undef RS6000_BUILTIN_D
13897 #undef RS6000_BUILTIN_H
13898 #undef RS6000_BUILTIN_P
13899 #undef RS6000_BUILTIN_Q
13901 /* Return true if a builtin function is overloaded. */
13902 bool
13903 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13905 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13908 const char *
13909 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13911 return rs6000_builtin_info[(int)fncode].name;
13914 /* Expand an expression EXP that calls a builtin without arguments. */
13915 static rtx
13916 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13918 rtx pat;
13919 machine_mode tmode = insn_data[icode].operand[0].mode;
13921 if (icode == CODE_FOR_nothing)
13922 /* Builtin not supported on this processor. */
13923 return 0;
13925 if (target == 0
13926 || GET_MODE (target) != tmode
13927 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13928 target = gen_reg_rtx (tmode);
13930 pat = GEN_FCN (icode) (target);
13931 if (! pat)
13932 return 0;
13933 emit_insn (pat);
13935 return target;
13939 static rtx
13940 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13942 rtx pat;
13943 tree arg0 = CALL_EXPR_ARG (exp, 0);
13944 tree arg1 = CALL_EXPR_ARG (exp, 1);
13945 rtx op0 = expand_normal (arg0);
13946 rtx op1 = expand_normal (arg1);
13947 machine_mode mode0 = insn_data[icode].operand[0].mode;
13948 machine_mode mode1 = insn_data[icode].operand[1].mode;
13950 if (icode == CODE_FOR_nothing)
13951 /* Builtin not supported on this processor. */
13952 return 0;
13954 /* If we got invalid arguments bail out before generating bad rtl. */
13955 if (arg0 == error_mark_node || arg1 == error_mark_node)
13956 return const0_rtx;
13958 if (GET_CODE (op0) != CONST_INT
13959 || INTVAL (op0) > 255
13960 || INTVAL (op0) < 0)
13962 error ("argument 1 must be an 8-bit field value");
13963 return const0_rtx;
13966 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13967 op0 = copy_to_mode_reg (mode0, op0);
13969 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13970 op1 = copy_to_mode_reg (mode1, op1);
13972 pat = GEN_FCN (icode) (op0, op1);
13973 if (! pat)
13974 return const0_rtx;
13975 emit_insn (pat);
13977 return NULL_RTX;
13980 static rtx
13981 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13983 rtx pat;
13984 tree arg0 = CALL_EXPR_ARG (exp, 0);
13985 rtx op0 = expand_normal (arg0);
13986 machine_mode tmode = insn_data[icode].operand[0].mode;
13987 machine_mode mode0 = insn_data[icode].operand[1].mode;
13989 if (icode == CODE_FOR_nothing)
13990 /* Builtin not supported on this processor. */
13991 return 0;
13993 /* If we got invalid arguments bail out before generating bad rtl. */
13994 if (arg0 == error_mark_node)
13995 return const0_rtx;
13997 if (icode == CODE_FOR_altivec_vspltisb
13998 || icode == CODE_FOR_altivec_vspltish
13999 || icode == CODE_FOR_altivec_vspltisw)
14001 /* Only allow 5-bit *signed* literals. */
14002 if (GET_CODE (op0) != CONST_INT
14003 || INTVAL (op0) > 15
14004 || INTVAL (op0) < -16)
14006 error ("argument 1 must be a 5-bit signed literal");
14007 return CONST0_RTX (tmode);
14011 if (target == 0
14012 || GET_MODE (target) != tmode
14013 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14014 target = gen_reg_rtx (tmode);
14016 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14017 op0 = copy_to_mode_reg (mode0, op0);
14019 pat = GEN_FCN (icode) (target, op0);
14020 if (! pat)
14021 return 0;
14022 emit_insn (pat);
14024 return target;
14027 static rtx
14028 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14030 rtx pat, scratch1, scratch2;
14031 tree arg0 = CALL_EXPR_ARG (exp, 0);
14032 rtx op0 = expand_normal (arg0);
14033 machine_mode tmode = insn_data[icode].operand[0].mode;
14034 machine_mode mode0 = insn_data[icode].operand[1].mode;
14036 /* If we have invalid arguments, bail out before generating bad rtl. */
14037 if (arg0 == error_mark_node)
14038 return const0_rtx;
14040 if (target == 0
14041 || GET_MODE (target) != tmode
14042 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14043 target = gen_reg_rtx (tmode);
14045 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14046 op0 = copy_to_mode_reg (mode0, op0);
14048 scratch1 = gen_reg_rtx (mode0);
14049 scratch2 = gen_reg_rtx (mode0);
14051 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14052 if (! pat)
14053 return 0;
14054 emit_insn (pat);
14056 return target;
14059 static rtx
14060 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14062 rtx pat;
14063 tree arg0 = CALL_EXPR_ARG (exp, 0);
14064 tree arg1 = CALL_EXPR_ARG (exp, 1);
14065 rtx op0 = expand_normal (arg0);
14066 rtx op1 = expand_normal (arg1);
14067 machine_mode tmode = insn_data[icode].operand[0].mode;
14068 machine_mode mode0 = insn_data[icode].operand[1].mode;
14069 machine_mode mode1 = insn_data[icode].operand[2].mode;
14071 if (icode == CODE_FOR_nothing)
14072 /* Builtin not supported on this processor. */
14073 return 0;
14075 /* If we got invalid arguments bail out before generating bad rtl. */
14076 if (arg0 == error_mark_node || arg1 == error_mark_node)
14077 return const0_rtx;
14079 if (icode == CODE_FOR_altivec_vcfux
14080 || icode == CODE_FOR_altivec_vcfsx
14081 || icode == CODE_FOR_altivec_vctsxs
14082 || icode == CODE_FOR_altivec_vctuxs
14083 || icode == CODE_FOR_altivec_vspltb
14084 || icode == CODE_FOR_altivec_vsplth
14085 || icode == CODE_FOR_altivec_vspltw)
14087 /* Only allow 5-bit unsigned literals. */
14088 STRIP_NOPS (arg1);
14089 if (TREE_CODE (arg1) != INTEGER_CST
14090 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14092 error ("argument 2 must be a 5-bit unsigned literal");
14093 return CONST0_RTX (tmode);
14096 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14097 || icode == CODE_FOR_dfptstsfi_lt_dd
14098 || icode == CODE_FOR_dfptstsfi_gt_dd
14099 || icode == CODE_FOR_dfptstsfi_unordered_dd
14100 || icode == CODE_FOR_dfptstsfi_eq_td
14101 || icode == CODE_FOR_dfptstsfi_lt_td
14102 || icode == CODE_FOR_dfptstsfi_gt_td
14103 || icode == CODE_FOR_dfptstsfi_unordered_td)
14105 /* Only allow 6-bit unsigned literals. */
14106 STRIP_NOPS (arg0);
14107 if (TREE_CODE (arg0) != INTEGER_CST
14108 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14110 error ("argument 1 must be a 6-bit unsigned literal");
14111 return CONST0_RTX (tmode);
14114 else if (icode == CODE_FOR_xststdcqp
14115 || icode == CODE_FOR_xststdcdp
14116 || icode == CODE_FOR_xststdcsp
14117 || icode == CODE_FOR_xvtstdcdp
14118 || icode == CODE_FOR_xvtstdcsp)
14120 /* Only allow 7-bit unsigned literals. */
14121 STRIP_NOPS (arg1);
14122 if (TREE_CODE (arg1) != INTEGER_CST
14123 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14125 error ("argument 2 must be a 7-bit unsigned literal");
14126 return CONST0_RTX (tmode);
14129 else if (icode == CODE_FOR_unpackv1ti
14130 || icode == CODE_FOR_unpackkf
14131 || icode == CODE_FOR_unpacktf
14132 || icode == CODE_FOR_unpackif
14133 || icode == CODE_FOR_unpacktd)
14135 /* Only allow 1-bit unsigned literals. */
14136 STRIP_NOPS (arg1);
14137 if (TREE_CODE (arg1) != INTEGER_CST
14138 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14140 error ("argument 2 must be a 1-bit unsigned literal");
14141 return CONST0_RTX (tmode);
14145 if (target == 0
14146 || GET_MODE (target) != tmode
14147 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14148 target = gen_reg_rtx (tmode);
14150 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14151 op0 = copy_to_mode_reg (mode0, op0);
14152 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14153 op1 = copy_to_mode_reg (mode1, op1);
14155 pat = GEN_FCN (icode) (target, op0, op1);
14156 if (! pat)
14157 return 0;
14158 emit_insn (pat);
14160 return target;
14163 static rtx
14164 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14166 rtx pat, scratch;
14167 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14168 tree arg0 = CALL_EXPR_ARG (exp, 1);
14169 tree arg1 = CALL_EXPR_ARG (exp, 2);
14170 rtx op0 = expand_normal (arg0);
14171 rtx op1 = expand_normal (arg1);
14172 machine_mode tmode = SImode;
14173 machine_mode mode0 = insn_data[icode].operand[1].mode;
14174 machine_mode mode1 = insn_data[icode].operand[2].mode;
14175 int cr6_form_int;
14177 if (TREE_CODE (cr6_form) != INTEGER_CST)
14179 error ("argument 1 of %qs must be a constant",
14180 "__builtin_altivec_predicate");
14181 return const0_rtx;
14183 else
14184 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14186 gcc_assert (mode0 == mode1);
14188 /* If we have invalid arguments, bail out before generating bad rtl. */
14189 if (arg0 == error_mark_node || arg1 == error_mark_node)
14190 return const0_rtx;
14192 if (target == 0
14193 || GET_MODE (target) != tmode
14194 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14195 target = gen_reg_rtx (tmode);
14197 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14198 op0 = copy_to_mode_reg (mode0, op0);
14199 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14200 op1 = copy_to_mode_reg (mode1, op1);
14202 /* Note that for many of the relevant operations (e.g. cmpne or
14203 cmpeq) with float or double operands, it makes more sense for the
14204 mode of the allocated scratch register to select a vector of
14205 integer. But the choice to copy the mode of operand 0 was made
14206 long ago and there are no plans to change it. */
14207 scratch = gen_reg_rtx (mode0);
14209 pat = GEN_FCN (icode) (scratch, op0, op1);
14210 if (! pat)
14211 return 0;
14212 emit_insn (pat);
14214 /* The vec_any* and vec_all* predicates use the same opcodes for two
14215 different operations, but the bits in CR6 will be different
14216 depending on what information we want. So we have to play tricks
14217 with CR6 to get the right bits out.
14219 If you think this is disgusting, look at the specs for the
14220 AltiVec predicates. */
14222 switch (cr6_form_int)
14224 case 0:
14225 emit_insn (gen_cr6_test_for_zero (target));
14226 break;
14227 case 1:
14228 emit_insn (gen_cr6_test_for_zero_reverse (target));
14229 break;
14230 case 2:
14231 emit_insn (gen_cr6_test_for_lt (target));
14232 break;
14233 case 3:
14234 emit_insn (gen_cr6_test_for_lt_reverse (target));
14235 break;
14236 default:
14237 error ("argument 1 of %qs is out of range",
14238 "__builtin_altivec_predicate");
14239 break;
14242 return target;
14245 static rtx
14246 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14248 rtx pat, addr;
14249 tree arg0 = CALL_EXPR_ARG (exp, 0);
14250 tree arg1 = CALL_EXPR_ARG (exp, 1);
14251 machine_mode tmode = insn_data[icode].operand[0].mode;
14252 machine_mode mode0 = Pmode;
14253 machine_mode mode1 = Pmode;
14254 rtx op0 = expand_normal (arg0);
14255 rtx op1 = expand_normal (arg1);
14257 if (icode == CODE_FOR_nothing)
14258 /* Builtin not supported on this processor. */
14259 return 0;
14261 /* If we got invalid arguments bail out before generating bad rtl. */
14262 if (arg0 == error_mark_node || arg1 == error_mark_node)
14263 return const0_rtx;
14265 if (target == 0
14266 || GET_MODE (target) != tmode
14267 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14268 target = gen_reg_rtx (tmode);
14270 op1 = copy_to_mode_reg (mode1, op1);
14272 if (op0 == const0_rtx)
14274 addr = gen_rtx_MEM (tmode, op1);
14276 else
14278 op0 = copy_to_mode_reg (mode0, op0);
14279 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14282 pat = GEN_FCN (icode) (target, addr);
14284 if (! pat)
14285 return 0;
14286 emit_insn (pat);
14288 return target;
14291 /* Return a constant vector for use as a little-endian permute control vector
14292 to reverse the order of elements of the given vector mode. */
14293 static rtx
14294 swap_selector_for_mode (machine_mode mode)
14296 /* These are little endian vectors, so their elements are reversed
14297 from what you would normally expect for a permute control vector. */
14298 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14299 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14300 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14301 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14302 unsigned int *swaparray, i;
14303 rtx perm[16];
14305 switch (mode)
14307 case E_V2DFmode:
14308 case E_V2DImode:
14309 swaparray = swap2;
14310 break;
14311 case E_V4SFmode:
14312 case E_V4SImode:
14313 swaparray = swap4;
14314 break;
14315 case E_V8HImode:
14316 swaparray = swap8;
14317 break;
14318 case E_V16QImode:
14319 swaparray = swap16;
14320 break;
14321 default:
14322 gcc_unreachable ();
14325 for (i = 0; i < 16; ++i)
14326 perm[i] = GEN_INT (swaparray[i]);
14328 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14331 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14332 with -maltivec=be specified. Issue the load followed by an element-
14333 reversing permute. */
14334 void
14335 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14337 rtx tmp = gen_reg_rtx (mode);
14338 rtx load = gen_rtx_SET (tmp, op1);
14339 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14340 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14341 rtx sel = swap_selector_for_mode (mode);
14342 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14344 gcc_assert (REG_P (op0));
14345 emit_insn (par);
14346 emit_insn (gen_rtx_SET (op0, vperm));
14349 /* Generate code for a "stvxl" built-in for a little endian target with
14350 -maltivec=be specified. Issue the store preceded by an element-reversing
14351 permute. */
14352 void
14353 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14355 rtx tmp = gen_reg_rtx (mode);
14356 rtx store = gen_rtx_SET (op0, tmp);
14357 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14358 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14359 rtx sel = swap_selector_for_mode (mode);
14360 rtx vperm;
14362 gcc_assert (REG_P (op1));
14363 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14364 emit_insn (gen_rtx_SET (tmp, vperm));
14365 emit_insn (par);
14368 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14369 specified. Issue the store preceded by an element-reversing permute. */
14370 void
14371 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14373 machine_mode inner_mode = GET_MODE_INNER (mode);
14374 rtx tmp = gen_reg_rtx (mode);
14375 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14376 rtx sel = swap_selector_for_mode (mode);
14377 rtx vperm;
14379 gcc_assert (REG_P (op1));
14380 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14381 emit_insn (gen_rtx_SET (tmp, vperm));
14382 emit_insn (gen_rtx_SET (op0, stvx));
14385 static rtx
14386 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14388 rtx pat, addr;
14389 tree arg0 = CALL_EXPR_ARG (exp, 0);
14390 tree arg1 = CALL_EXPR_ARG (exp, 1);
14391 machine_mode tmode = insn_data[icode].operand[0].mode;
14392 machine_mode mode0 = Pmode;
14393 machine_mode mode1 = Pmode;
14394 rtx op0 = expand_normal (arg0);
14395 rtx op1 = expand_normal (arg1);
14397 if (icode == CODE_FOR_nothing)
14398 /* Builtin not supported on this processor. */
14399 return 0;
14401 /* If we got invalid arguments bail out before generating bad rtl. */
14402 if (arg0 == error_mark_node || arg1 == error_mark_node)
14403 return const0_rtx;
14405 if (target == 0
14406 || GET_MODE (target) != tmode
14407 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14408 target = gen_reg_rtx (tmode);
14410 op1 = copy_to_mode_reg (mode1, op1);
14412 /* For LVX, express the RTL accurately by ANDing the address with -16.
14413 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14414 so the raw address is fine. */
14415 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14416 || icode == CODE_FOR_altivec_lvx_v2di_2op
14417 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14418 || icode == CODE_FOR_altivec_lvx_v4si_2op
14419 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14420 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14422 rtx rawaddr;
14423 if (op0 == const0_rtx)
14424 rawaddr = op1;
14425 else
14427 op0 = copy_to_mode_reg (mode0, op0);
14428 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14430 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14431 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14433 /* For -maltivec=be, emit the load and follow it up with a
14434 permute to swap the elements. */
14435 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14437 rtx temp = gen_reg_rtx (tmode);
14438 emit_insn (gen_rtx_SET (temp, addr));
14440 rtx sel = swap_selector_for_mode (tmode);
14441 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14442 UNSPEC_VPERM);
14443 emit_insn (gen_rtx_SET (target, vperm));
14445 else
14446 emit_insn (gen_rtx_SET (target, addr));
14448 else
14450 if (op0 == const0_rtx)
14451 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14452 else
14454 op0 = copy_to_mode_reg (mode0, op0);
14455 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14456 gen_rtx_PLUS (Pmode, op1, op0));
14459 pat = GEN_FCN (icode) (target, addr);
14460 if (! pat)
14461 return 0;
14462 emit_insn (pat);
14465 return target;
14468 static rtx
14469 altivec_expand_xl_be_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14471 rtx pat, addr;
14472 tree arg0 = CALL_EXPR_ARG (exp, 0);
14473 tree arg1 = CALL_EXPR_ARG (exp, 1);
14474 machine_mode tmode = insn_data[icode].operand[0].mode;
14475 machine_mode mode0 = Pmode;
14476 machine_mode mode1 = Pmode;
14477 rtx op0 = expand_normal (arg0);
14478 rtx op1 = expand_normal (arg1);
14480 if (icode == CODE_FOR_nothing)
14481 /* Builtin not supported on this processor. */
14482 return 0;
14484 /* If we got invalid arguments bail out before generating bad rtl. */
14485 if (arg0 == error_mark_node || arg1 == error_mark_node)
14486 return const0_rtx;
14488 if (target == 0
14489 || GET_MODE (target) != tmode
14490 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14491 target = gen_reg_rtx (tmode);
14493 op1 = copy_to_mode_reg (mode1, op1);
14495 if (op0 == const0_rtx)
14496 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14497 else
14499 op0 = copy_to_mode_reg (mode0, op0);
14500 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14501 gen_rtx_PLUS (Pmode, op1, op0));
14504 pat = GEN_FCN (icode) (target, addr);
14505 if (!pat)
14506 return 0;
14508 emit_insn (pat);
14509 /* Reverse element order of elements if in LE mode */
14510 if (!VECTOR_ELT_ORDER_BIG)
14512 rtx sel = swap_selector_for_mode (tmode);
14513 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, target, target, sel),
14514 UNSPEC_VPERM);
14515 emit_insn (gen_rtx_SET (target, vperm));
14517 return target;
14520 static rtx
14521 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14523 tree arg0 = CALL_EXPR_ARG (exp, 0);
14524 tree arg1 = CALL_EXPR_ARG (exp, 1);
14525 tree arg2 = CALL_EXPR_ARG (exp, 2);
14526 rtx op0 = expand_normal (arg0);
14527 rtx op1 = expand_normal (arg1);
14528 rtx op2 = expand_normal (arg2);
14529 rtx pat, addr;
14530 machine_mode tmode = insn_data[icode].operand[0].mode;
14531 machine_mode mode1 = Pmode;
14532 machine_mode mode2 = Pmode;
14534 /* Invalid arguments. Bail before doing anything stoopid! */
14535 if (arg0 == error_mark_node
14536 || arg1 == error_mark_node
14537 || arg2 == error_mark_node)
14538 return const0_rtx;
14540 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14541 op0 = copy_to_mode_reg (tmode, op0);
14543 op2 = copy_to_mode_reg (mode2, op2);
14545 if (op1 == const0_rtx)
14547 addr = gen_rtx_MEM (tmode, op2);
14549 else
14551 op1 = copy_to_mode_reg (mode1, op1);
14552 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14555 pat = GEN_FCN (icode) (addr, op0);
14556 if (pat)
14557 emit_insn (pat);
14558 return NULL_RTX;
14561 static rtx
14562 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14564 rtx pat;
14565 tree arg0 = CALL_EXPR_ARG (exp, 0);
14566 tree arg1 = CALL_EXPR_ARG (exp, 1);
14567 tree arg2 = CALL_EXPR_ARG (exp, 2);
14568 rtx op0 = expand_normal (arg0);
14569 rtx op1 = expand_normal (arg1);
14570 rtx op2 = expand_normal (arg2);
14571 machine_mode mode0 = insn_data[icode].operand[0].mode;
14572 machine_mode mode1 = insn_data[icode].operand[1].mode;
14573 machine_mode mode2 = insn_data[icode].operand[2].mode;
14575 if (icode == CODE_FOR_nothing)
14576 /* Builtin not supported on this processor. */
14577 return NULL_RTX;
14579 /* If we got invalid arguments bail out before generating bad rtl. */
14580 if (arg0 == error_mark_node
14581 || arg1 == error_mark_node
14582 || arg2 == error_mark_node)
14583 return NULL_RTX;
14585 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14586 op0 = copy_to_mode_reg (mode0, op0);
14587 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14588 op1 = copy_to_mode_reg (mode1, op1);
14589 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14590 op2 = copy_to_mode_reg (mode2, op2);
14592 pat = GEN_FCN (icode) (op0, op1, op2);
14593 if (pat)
14594 emit_insn (pat);
14596 return NULL_RTX;
14599 static rtx
14600 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14602 tree arg0 = CALL_EXPR_ARG (exp, 0);
14603 tree arg1 = CALL_EXPR_ARG (exp, 1);
14604 tree arg2 = CALL_EXPR_ARG (exp, 2);
14605 rtx op0 = expand_normal (arg0);
14606 rtx op1 = expand_normal (arg1);
14607 rtx op2 = expand_normal (arg2);
14608 rtx pat, addr, rawaddr;
14609 machine_mode tmode = insn_data[icode].operand[0].mode;
14610 machine_mode smode = insn_data[icode].operand[1].mode;
14611 machine_mode mode1 = Pmode;
14612 machine_mode mode2 = Pmode;
14614 /* Invalid arguments. Bail before doing anything stoopid! */
14615 if (arg0 == error_mark_node
14616 || arg1 == error_mark_node
14617 || arg2 == error_mark_node)
14618 return const0_rtx;
14620 op2 = copy_to_mode_reg (mode2, op2);
14622 /* For STVX, express the RTL accurately by ANDing the address with -16.
14623 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14624 so the raw address is fine. */
14625 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14626 || icode == CODE_FOR_altivec_stvx_v2di_2op
14627 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14628 || icode == CODE_FOR_altivec_stvx_v4si_2op
14629 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14630 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14632 if (op1 == const0_rtx)
14633 rawaddr = op2;
14634 else
14636 op1 = copy_to_mode_reg (mode1, op1);
14637 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14640 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14641 addr = gen_rtx_MEM (tmode, addr);
14643 op0 = copy_to_mode_reg (tmode, op0);
14645 /* For -maltivec=be, emit a permute to swap the elements, followed
14646 by the store. */
14647 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14649 rtx temp = gen_reg_rtx (tmode);
14650 rtx sel = swap_selector_for_mode (tmode);
14651 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14652 UNSPEC_VPERM);
14653 emit_insn (gen_rtx_SET (temp, vperm));
14654 emit_insn (gen_rtx_SET (addr, temp));
14656 else
14657 emit_insn (gen_rtx_SET (addr, op0));
14659 else
14661 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14662 op0 = copy_to_mode_reg (smode, op0);
14664 if (op1 == const0_rtx)
14665 addr = gen_rtx_MEM (tmode, op2);
14666 else
14668 op1 = copy_to_mode_reg (mode1, op1);
14669 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14672 pat = GEN_FCN (icode) (addr, op0);
14673 if (pat)
14674 emit_insn (pat);
14677 return NULL_RTX;
14680 /* Return the appropriate SPR number associated with the given builtin. */
14681 static inline HOST_WIDE_INT
14682 htm_spr_num (enum rs6000_builtins code)
14684 if (code == HTM_BUILTIN_GET_TFHAR
14685 || code == HTM_BUILTIN_SET_TFHAR)
14686 return TFHAR_SPR;
14687 else if (code == HTM_BUILTIN_GET_TFIAR
14688 || code == HTM_BUILTIN_SET_TFIAR)
14689 return TFIAR_SPR;
14690 else if (code == HTM_BUILTIN_GET_TEXASR
14691 || code == HTM_BUILTIN_SET_TEXASR)
14692 return TEXASR_SPR;
14693 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14694 || code == HTM_BUILTIN_SET_TEXASRU);
14695 return TEXASRU_SPR;
14698 /* Return the appropriate SPR regno associated with the given builtin. */
14699 static inline HOST_WIDE_INT
14700 htm_spr_regno (enum rs6000_builtins code)
14702 if (code == HTM_BUILTIN_GET_TFHAR
14703 || code == HTM_BUILTIN_SET_TFHAR)
14704 return TFHAR_REGNO;
14705 else if (code == HTM_BUILTIN_GET_TFIAR
14706 || code == HTM_BUILTIN_SET_TFIAR)
14707 return TFIAR_REGNO;
14708 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14709 || code == HTM_BUILTIN_SET_TEXASR
14710 || code == HTM_BUILTIN_GET_TEXASRU
14711 || code == HTM_BUILTIN_SET_TEXASRU);
14712 return TEXASR_REGNO;
14715 /* Return the correct ICODE value depending on whether we are
14716 setting or reading the HTM SPRs. */
14717 static inline enum insn_code
14718 rs6000_htm_spr_icode (bool nonvoid)
14720 if (nonvoid)
14721 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14722 else
14723 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14726 /* Expand the HTM builtin in EXP and store the result in TARGET.
14727 Store true in *EXPANDEDP if we found a builtin to expand. */
14728 static rtx
14729 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14731 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14732 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14733 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14734 const struct builtin_description *d;
14735 size_t i;
14737 *expandedp = true;
14739 if (!TARGET_POWERPC64
14740 && (fcode == HTM_BUILTIN_TABORTDC
14741 || fcode == HTM_BUILTIN_TABORTDCI))
14743 size_t uns_fcode = (size_t)fcode;
14744 const char *name = rs6000_builtin_info[uns_fcode].name;
14745 error ("builtin %qs is only valid in 64-bit mode", name);
14746 return const0_rtx;
14749 /* Expand the HTM builtins. */
14750 d = bdesc_htm;
14751 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14752 if (d->code == fcode)
14754 rtx op[MAX_HTM_OPERANDS], pat;
14755 int nopnds = 0;
14756 tree arg;
14757 call_expr_arg_iterator iter;
14758 unsigned attr = rs6000_builtin_info[fcode].attr;
14759 enum insn_code icode = d->icode;
14760 const struct insn_operand_data *insn_op;
14761 bool uses_spr = (attr & RS6000_BTC_SPR);
14762 rtx cr = NULL_RTX;
14764 if (uses_spr)
14765 icode = rs6000_htm_spr_icode (nonvoid);
14766 insn_op = &insn_data[icode].operand[0];
14768 if (nonvoid)
14770 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14771 if (!target
14772 || GET_MODE (target) != tmode
14773 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14774 target = gen_reg_rtx (tmode);
14775 if (uses_spr)
14776 op[nopnds++] = target;
14779 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14781 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14782 return const0_rtx;
14784 insn_op = &insn_data[icode].operand[nopnds];
14786 op[nopnds] = expand_normal (arg);
14788 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14790 if (!strcmp (insn_op->constraint, "n"))
14792 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14793 if (!CONST_INT_P (op[nopnds]))
14794 error ("argument %d must be an unsigned literal", arg_num);
14795 else
14796 error ("argument %d is an unsigned literal that is "
14797 "out of range", arg_num);
14798 return const0_rtx;
14800 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14803 nopnds++;
14806 /* Handle the builtins for extended mnemonics. These accept
14807 no arguments, but map to builtins that take arguments. */
14808 switch (fcode)
14810 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14811 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14812 op[nopnds++] = GEN_INT (1);
14813 if (flag_checking)
14814 attr |= RS6000_BTC_UNARY;
14815 break;
14816 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14817 op[nopnds++] = GEN_INT (0);
14818 if (flag_checking)
14819 attr |= RS6000_BTC_UNARY;
14820 break;
14821 default:
14822 break;
14825 /* If this builtin accesses SPRs, then pass in the appropriate
14826 SPR number and SPR regno as the last two operands. */
14827 if (uses_spr)
14829 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14830 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14831 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14833 /* If this builtin accesses a CR, then pass in a scratch
14834 CR as the last operand. */
14835 else if (attr & RS6000_BTC_CR)
14836 { cr = gen_reg_rtx (CCmode);
14837 op[nopnds++] = cr;
14840 if (flag_checking)
14842 int expected_nopnds = 0;
14843 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14844 expected_nopnds = 1;
14845 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14846 expected_nopnds = 2;
14847 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14848 expected_nopnds = 3;
14849 if (!(attr & RS6000_BTC_VOID))
14850 expected_nopnds += 1;
14851 if (uses_spr)
14852 expected_nopnds += 2;
14854 gcc_assert (nopnds == expected_nopnds
14855 && nopnds <= MAX_HTM_OPERANDS);
14858 switch (nopnds)
14860 case 1:
14861 pat = GEN_FCN (icode) (op[0]);
14862 break;
14863 case 2:
14864 pat = GEN_FCN (icode) (op[0], op[1]);
14865 break;
14866 case 3:
14867 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14868 break;
14869 case 4:
14870 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14871 break;
14872 default:
14873 gcc_unreachable ();
14875 if (!pat)
14876 return NULL_RTX;
14877 emit_insn (pat);
14879 if (attr & RS6000_BTC_CR)
14881 if (fcode == HTM_BUILTIN_TBEGIN)
14883 /* Emit code to set TARGET to true or false depending on
14884 whether the tbegin. instruction successfully or failed
14885 to start a transaction. We do this by placing the 1's
14886 complement of CR's EQ bit into TARGET. */
14887 rtx scratch = gen_reg_rtx (SImode);
14888 emit_insn (gen_rtx_SET (scratch,
14889 gen_rtx_EQ (SImode, cr,
14890 const0_rtx)));
14891 emit_insn (gen_rtx_SET (target,
14892 gen_rtx_XOR (SImode, scratch,
14893 GEN_INT (1))));
14895 else
14897 /* Emit code to copy the 4-bit condition register field
14898 CR into the least significant end of register TARGET. */
14899 rtx scratch1 = gen_reg_rtx (SImode);
14900 rtx scratch2 = gen_reg_rtx (SImode);
14901 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14902 emit_insn (gen_movcc (subreg, cr));
14903 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14904 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14908 if (nonvoid)
14909 return target;
14910 return const0_rtx;
14913 *expandedp = false;
14914 return NULL_RTX;
14917 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14919 static rtx
14920 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14921 rtx target)
14923 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14924 if (fcode == RS6000_BUILTIN_CPU_INIT)
14925 return const0_rtx;
14927 if (target == 0 || GET_MODE (target) != SImode)
14928 target = gen_reg_rtx (SImode);
14930 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14931 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14932 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14933 to a STRING_CST. */
14934 if (TREE_CODE (arg) == ARRAY_REF
14935 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14936 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14937 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14938 arg = TREE_OPERAND (arg, 0);
14940 if (TREE_CODE (arg) != STRING_CST)
14942 error ("builtin %qs only accepts a string argument",
14943 rs6000_builtin_info[(size_t) fcode].name);
14944 return const0_rtx;
14947 if (fcode == RS6000_BUILTIN_CPU_IS)
14949 const char *cpu = TREE_STRING_POINTER (arg);
14950 rtx cpuid = NULL_RTX;
14951 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14952 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14954 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14955 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14956 break;
14958 if (cpuid == NULL_RTX)
14960 /* Invalid CPU argument. */
14961 error ("cpu %qs is an invalid argument to builtin %qs",
14962 cpu, rs6000_builtin_info[(size_t) fcode].name);
14963 return const0_rtx;
14966 rtx platform = gen_reg_rtx (SImode);
14967 rtx tcbmem = gen_const_mem (SImode,
14968 gen_rtx_PLUS (Pmode,
14969 gen_rtx_REG (Pmode, TLS_REGNUM),
14970 GEN_INT (TCB_PLATFORM_OFFSET)));
14971 emit_move_insn (platform, tcbmem);
14972 emit_insn (gen_eqsi3 (target, platform, cpuid));
14974 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14976 const char *hwcap = TREE_STRING_POINTER (arg);
14977 rtx mask = NULL_RTX;
14978 int hwcap_offset;
14979 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14980 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14982 mask = GEN_INT (cpu_supports_info[i].mask);
14983 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14984 break;
14986 if (mask == NULL_RTX)
14988 /* Invalid HWCAP argument. */
14989 error ("%s %qs is an invalid argument to builtin %qs",
14990 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14991 return const0_rtx;
14994 rtx tcb_hwcap = gen_reg_rtx (SImode);
14995 rtx tcbmem = gen_const_mem (SImode,
14996 gen_rtx_PLUS (Pmode,
14997 gen_rtx_REG (Pmode, TLS_REGNUM),
14998 GEN_INT (hwcap_offset)));
14999 emit_move_insn (tcb_hwcap, tcbmem);
15000 rtx scratch1 = gen_reg_rtx (SImode);
15001 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
15002 rtx scratch2 = gen_reg_rtx (SImode);
15003 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
15004 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
15006 else
15007 gcc_unreachable ();
15009 /* Record that we have expanded a CPU builtin, so that we can later
15010 emit a reference to the special symbol exported by LIBC to ensure we
15011 do not link against an old LIBC that doesn't support this feature. */
15012 cpu_builtin_p = true;
15014 #else
15015 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
15016 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
15018 /* For old LIBCs, always return FALSE. */
15019 emit_move_insn (target, GEN_INT (0));
15020 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
15022 return target;
15025 static rtx
15026 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
15028 rtx pat;
15029 tree arg0 = CALL_EXPR_ARG (exp, 0);
15030 tree arg1 = CALL_EXPR_ARG (exp, 1);
15031 tree arg2 = CALL_EXPR_ARG (exp, 2);
15032 rtx op0 = expand_normal (arg0);
15033 rtx op1 = expand_normal (arg1);
15034 rtx op2 = expand_normal (arg2);
15035 machine_mode tmode = insn_data[icode].operand[0].mode;
15036 machine_mode mode0 = insn_data[icode].operand[1].mode;
15037 machine_mode mode1 = insn_data[icode].operand[2].mode;
15038 machine_mode mode2 = insn_data[icode].operand[3].mode;
15040 if (icode == CODE_FOR_nothing)
15041 /* Builtin not supported on this processor. */
15042 return 0;
15044 /* If we got invalid arguments bail out before generating bad rtl. */
15045 if (arg0 == error_mark_node
15046 || arg1 == error_mark_node
15047 || arg2 == error_mark_node)
15048 return const0_rtx;
15050 /* Check and prepare argument depending on the instruction code.
15052 Note that a switch statement instead of the sequence of tests
15053 would be incorrect as many of the CODE_FOR values could be
15054 CODE_FOR_nothing and that would yield multiple alternatives
15055 with identical values. We'd never reach here at runtime in
15056 this case. */
15057 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15058 || icode == CODE_FOR_altivec_vsldoi_v2df
15059 || icode == CODE_FOR_altivec_vsldoi_v4si
15060 || icode == CODE_FOR_altivec_vsldoi_v8hi
15061 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15063 /* Only allow 4-bit unsigned literals. */
15064 STRIP_NOPS (arg2);
15065 if (TREE_CODE (arg2) != INTEGER_CST
15066 || TREE_INT_CST_LOW (arg2) & ~0xf)
15068 error ("argument 3 must be a 4-bit unsigned literal");
15069 return CONST0_RTX (tmode);
15072 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15073 || icode == CODE_FOR_vsx_xxpermdi_v2di
15074 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15075 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15076 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15077 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15078 || icode == CODE_FOR_vsx_xxpermdi_v4si
15079 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15080 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15081 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15082 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15083 || icode == CODE_FOR_vsx_xxsldwi_v4si
15084 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15085 || icode == CODE_FOR_vsx_xxsldwi_v2di
15086 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15088 /* Only allow 2-bit unsigned literals. */
15089 STRIP_NOPS (arg2);
15090 if (TREE_CODE (arg2) != INTEGER_CST
15091 || TREE_INT_CST_LOW (arg2) & ~0x3)
15093 error ("argument 3 must be a 2-bit unsigned literal");
15094 return CONST0_RTX (tmode);
15097 else if (icode == CODE_FOR_vsx_set_v2df
15098 || icode == CODE_FOR_vsx_set_v2di
15099 || icode == CODE_FOR_bcdadd
15100 || icode == CODE_FOR_bcdadd_lt
15101 || icode == CODE_FOR_bcdadd_eq
15102 || icode == CODE_FOR_bcdadd_gt
15103 || icode == CODE_FOR_bcdsub
15104 || icode == CODE_FOR_bcdsub_lt
15105 || icode == CODE_FOR_bcdsub_eq
15106 || icode == CODE_FOR_bcdsub_gt)
15108 /* Only allow 1-bit unsigned literals. */
15109 STRIP_NOPS (arg2);
15110 if (TREE_CODE (arg2) != INTEGER_CST
15111 || TREE_INT_CST_LOW (arg2) & ~0x1)
15113 error ("argument 3 must be a 1-bit unsigned literal");
15114 return CONST0_RTX (tmode);
15117 else if (icode == CODE_FOR_dfp_ddedpd_dd
15118 || icode == CODE_FOR_dfp_ddedpd_td)
15120 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15121 STRIP_NOPS (arg0);
15122 if (TREE_CODE (arg0) != INTEGER_CST
15123 || TREE_INT_CST_LOW (arg2) & ~0x3)
15125 error ("argument 1 must be 0 or 2");
15126 return CONST0_RTX (tmode);
15129 else if (icode == CODE_FOR_dfp_denbcd_dd
15130 || icode == CODE_FOR_dfp_denbcd_td)
15132 /* Only allow 1-bit unsigned literals. */
15133 STRIP_NOPS (arg0);
15134 if (TREE_CODE (arg0) != INTEGER_CST
15135 || TREE_INT_CST_LOW (arg0) & ~0x1)
15137 error ("argument 1 must be a 1-bit unsigned literal");
15138 return CONST0_RTX (tmode);
15141 else if (icode == CODE_FOR_dfp_dscli_dd
15142 || icode == CODE_FOR_dfp_dscli_td
15143 || icode == CODE_FOR_dfp_dscri_dd
15144 || icode == CODE_FOR_dfp_dscri_td)
15146 /* Only allow 6-bit unsigned literals. */
15147 STRIP_NOPS (arg1);
15148 if (TREE_CODE (arg1) != INTEGER_CST
15149 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15151 error ("argument 2 must be a 6-bit unsigned literal");
15152 return CONST0_RTX (tmode);
15155 else if (icode == CODE_FOR_crypto_vshasigmaw
15156 || icode == CODE_FOR_crypto_vshasigmad)
15158 /* Check whether the 2nd and 3rd arguments are integer constants and in
15159 range and prepare arguments. */
15160 STRIP_NOPS (arg1);
15161 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
15163 error ("argument 2 must be 0 or 1");
15164 return CONST0_RTX (tmode);
15167 STRIP_NOPS (arg2);
15168 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
15170 error ("argument 3 must be in the range 0..15");
15171 return CONST0_RTX (tmode);
15175 if (target == 0
15176 || GET_MODE (target) != tmode
15177 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15178 target = gen_reg_rtx (tmode);
15180 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15181 op0 = copy_to_mode_reg (mode0, op0);
15182 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15183 op1 = copy_to_mode_reg (mode1, op1);
15184 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15185 op2 = copy_to_mode_reg (mode2, op2);
15187 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15188 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15189 else
15190 pat = GEN_FCN (icode) (target, op0, op1, op2);
15191 if (! pat)
15192 return 0;
15193 emit_insn (pat);
15195 return target;
15198 /* Expand the lvx builtins. */
15199 static rtx
15200 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15202 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15203 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15204 tree arg0;
15205 machine_mode tmode, mode0;
15206 rtx pat, op0;
15207 enum insn_code icode;
15209 switch (fcode)
15211 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15212 icode = CODE_FOR_vector_altivec_load_v16qi;
15213 break;
15214 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15215 icode = CODE_FOR_vector_altivec_load_v8hi;
15216 break;
15217 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15218 icode = CODE_FOR_vector_altivec_load_v4si;
15219 break;
15220 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15221 icode = CODE_FOR_vector_altivec_load_v4sf;
15222 break;
15223 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15224 icode = CODE_FOR_vector_altivec_load_v2df;
15225 break;
15226 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15227 icode = CODE_FOR_vector_altivec_load_v2di;
15228 break;
15229 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15230 icode = CODE_FOR_vector_altivec_load_v1ti;
15231 break;
15232 default:
15233 *expandedp = false;
15234 return NULL_RTX;
15237 *expandedp = true;
15239 arg0 = CALL_EXPR_ARG (exp, 0);
15240 op0 = expand_normal (arg0);
15241 tmode = insn_data[icode].operand[0].mode;
15242 mode0 = insn_data[icode].operand[1].mode;
15244 if (target == 0
15245 || GET_MODE (target) != tmode
15246 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15247 target = gen_reg_rtx (tmode);
15249 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15250 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15252 pat = GEN_FCN (icode) (target, op0);
15253 if (! pat)
15254 return 0;
15255 emit_insn (pat);
15256 return target;
15259 /* Expand the stvx builtins. */
15260 static rtx
15261 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15262 bool *expandedp)
15264 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15265 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15266 tree arg0, arg1;
15267 machine_mode mode0, mode1;
15268 rtx pat, op0, op1;
15269 enum insn_code icode;
15271 switch (fcode)
15273 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15274 icode = CODE_FOR_vector_altivec_store_v16qi;
15275 break;
15276 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15277 icode = CODE_FOR_vector_altivec_store_v8hi;
15278 break;
15279 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15280 icode = CODE_FOR_vector_altivec_store_v4si;
15281 break;
15282 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15283 icode = CODE_FOR_vector_altivec_store_v4sf;
15284 break;
15285 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15286 icode = CODE_FOR_vector_altivec_store_v2df;
15287 break;
15288 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15289 icode = CODE_FOR_vector_altivec_store_v2di;
15290 break;
15291 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15292 icode = CODE_FOR_vector_altivec_store_v1ti;
15293 break;
15294 default:
15295 *expandedp = false;
15296 return NULL_RTX;
15299 arg0 = CALL_EXPR_ARG (exp, 0);
15300 arg1 = CALL_EXPR_ARG (exp, 1);
15301 op0 = expand_normal (arg0);
15302 op1 = expand_normal (arg1);
15303 mode0 = insn_data[icode].operand[0].mode;
15304 mode1 = insn_data[icode].operand[1].mode;
15306 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15307 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15308 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15309 op1 = copy_to_mode_reg (mode1, op1);
15311 pat = GEN_FCN (icode) (op0, op1);
15312 if (pat)
15313 emit_insn (pat);
15315 *expandedp = true;
15316 return NULL_RTX;
15319 /* Expand the dst builtins. */
15320 static rtx
15321 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15322 bool *expandedp)
15324 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15325 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15326 tree arg0, arg1, arg2;
15327 machine_mode mode0, mode1;
15328 rtx pat, op0, op1, op2;
15329 const struct builtin_description *d;
15330 size_t i;
15332 *expandedp = false;
15334 /* Handle DST variants. */
15335 d = bdesc_dst;
15336 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15337 if (d->code == fcode)
15339 arg0 = CALL_EXPR_ARG (exp, 0);
15340 arg1 = CALL_EXPR_ARG (exp, 1);
15341 arg2 = CALL_EXPR_ARG (exp, 2);
15342 op0 = expand_normal (arg0);
15343 op1 = expand_normal (arg1);
15344 op2 = expand_normal (arg2);
15345 mode0 = insn_data[d->icode].operand[0].mode;
15346 mode1 = insn_data[d->icode].operand[1].mode;
15348 /* Invalid arguments, bail out before generating bad rtl. */
15349 if (arg0 == error_mark_node
15350 || arg1 == error_mark_node
15351 || arg2 == error_mark_node)
15352 return const0_rtx;
15354 *expandedp = true;
15355 STRIP_NOPS (arg2);
15356 if (TREE_CODE (arg2) != INTEGER_CST
15357 || TREE_INT_CST_LOW (arg2) & ~0x3)
15359 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15360 return const0_rtx;
15363 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15364 op0 = copy_to_mode_reg (Pmode, op0);
15365 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15366 op1 = copy_to_mode_reg (mode1, op1);
15368 pat = GEN_FCN (d->icode) (op0, op1, op2);
15369 if (pat != 0)
15370 emit_insn (pat);
15372 return NULL_RTX;
15375 return NULL_RTX;
15378 /* Expand vec_init builtin. */
15379 static rtx
15380 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15382 machine_mode tmode = TYPE_MODE (type);
15383 machine_mode inner_mode = GET_MODE_INNER (tmode);
15384 int i, n_elt = GET_MODE_NUNITS (tmode);
15386 gcc_assert (VECTOR_MODE_P (tmode));
15387 gcc_assert (n_elt == call_expr_nargs (exp));
15389 if (!target || !register_operand (target, tmode))
15390 target = gen_reg_rtx (tmode);
15392 /* If we have a vector compromised of a single element, such as V1TImode, do
15393 the initialization directly. */
15394 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15396 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15397 emit_move_insn (target, gen_lowpart (tmode, x));
15399 else
15401 rtvec v = rtvec_alloc (n_elt);
15403 for (i = 0; i < n_elt; ++i)
15405 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15406 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15409 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15412 return target;
15415 /* Return the integer constant in ARG. Constrain it to be in the range
15416 of the subparts of VEC_TYPE; issue an error if not. */
15418 static int
15419 get_element_number (tree vec_type, tree arg)
15421 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15423 if (!tree_fits_uhwi_p (arg)
15424 || (elt = tree_to_uhwi (arg), elt > max))
15426 error ("selector must be an integer constant in the range 0..%wi", max);
15427 return 0;
15430 return elt;
15433 /* Expand vec_set builtin. */
15434 static rtx
15435 altivec_expand_vec_set_builtin (tree exp)
15437 machine_mode tmode, mode1;
15438 tree arg0, arg1, arg2;
15439 int elt;
15440 rtx op0, op1;
15442 arg0 = CALL_EXPR_ARG (exp, 0);
15443 arg1 = CALL_EXPR_ARG (exp, 1);
15444 arg2 = CALL_EXPR_ARG (exp, 2);
15446 tmode = TYPE_MODE (TREE_TYPE (arg0));
15447 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15448 gcc_assert (VECTOR_MODE_P (tmode));
15450 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15451 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15452 elt = get_element_number (TREE_TYPE (arg0), arg2);
15454 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15455 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15457 op0 = force_reg (tmode, op0);
15458 op1 = force_reg (mode1, op1);
15460 rs6000_expand_vector_set (op0, op1, elt);
15462 return op0;
15465 /* Expand vec_ext builtin. */
15466 static rtx
15467 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15469 machine_mode tmode, mode0;
15470 tree arg0, arg1;
15471 rtx op0;
15472 rtx op1;
15474 arg0 = CALL_EXPR_ARG (exp, 0);
15475 arg1 = CALL_EXPR_ARG (exp, 1);
15477 op0 = expand_normal (arg0);
15478 op1 = expand_normal (arg1);
15480 /* Call get_element_number to validate arg1 if it is a constant. */
15481 if (TREE_CODE (arg1) == INTEGER_CST)
15482 (void) get_element_number (TREE_TYPE (arg0), arg1);
15484 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15485 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15486 gcc_assert (VECTOR_MODE_P (mode0));
15488 op0 = force_reg (mode0, op0);
15490 if (optimize || !target || !register_operand (target, tmode))
15491 target = gen_reg_rtx (tmode);
15493 rs6000_expand_vector_extract (target, op0, op1);
15495 return target;
15498 /* Expand the builtin in EXP and store the result in TARGET. Store
15499 true in *EXPANDEDP if we found a builtin to expand. */
15500 static rtx
15501 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15503 const struct builtin_description *d;
15504 size_t i;
15505 enum insn_code icode;
15506 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15507 tree arg0, arg1, arg2;
15508 rtx op0, pat;
15509 machine_mode tmode, mode0;
15510 enum rs6000_builtins fcode
15511 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15513 if (rs6000_overloaded_builtin_p (fcode))
15515 *expandedp = true;
15516 error ("unresolved overload for Altivec builtin %qF", fndecl);
15518 /* Given it is invalid, just generate a normal call. */
15519 return expand_call (exp, target, false);
15522 target = altivec_expand_ld_builtin (exp, target, expandedp);
15523 if (*expandedp)
15524 return target;
15526 target = altivec_expand_st_builtin (exp, target, expandedp);
15527 if (*expandedp)
15528 return target;
15530 target = altivec_expand_dst_builtin (exp, target, expandedp);
15531 if (*expandedp)
15532 return target;
15534 *expandedp = true;
15536 switch (fcode)
15538 case ALTIVEC_BUILTIN_STVX_V2DF:
15539 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15540 case ALTIVEC_BUILTIN_STVX_V2DI:
15541 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15542 case ALTIVEC_BUILTIN_STVX_V4SF:
15543 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15544 case ALTIVEC_BUILTIN_STVX:
15545 case ALTIVEC_BUILTIN_STVX_V4SI:
15546 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15547 case ALTIVEC_BUILTIN_STVX_V8HI:
15548 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15549 case ALTIVEC_BUILTIN_STVX_V16QI:
15550 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15551 case ALTIVEC_BUILTIN_STVEBX:
15552 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15553 case ALTIVEC_BUILTIN_STVEHX:
15554 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15555 case ALTIVEC_BUILTIN_STVEWX:
15556 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15557 case ALTIVEC_BUILTIN_STVXL_V2DF:
15558 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15559 case ALTIVEC_BUILTIN_STVXL_V2DI:
15560 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15561 case ALTIVEC_BUILTIN_STVXL_V4SF:
15562 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15563 case ALTIVEC_BUILTIN_STVXL:
15564 case ALTIVEC_BUILTIN_STVXL_V4SI:
15565 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15566 case ALTIVEC_BUILTIN_STVXL_V8HI:
15567 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15568 case ALTIVEC_BUILTIN_STVXL_V16QI:
15569 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15571 case ALTIVEC_BUILTIN_STVLX:
15572 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15573 case ALTIVEC_BUILTIN_STVLXL:
15574 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15575 case ALTIVEC_BUILTIN_STVRX:
15576 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15577 case ALTIVEC_BUILTIN_STVRXL:
15578 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15580 case P9V_BUILTIN_STXVL:
15581 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15583 case VSX_BUILTIN_STXVD2X_V1TI:
15584 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15585 case VSX_BUILTIN_STXVD2X_V2DF:
15586 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15587 case VSX_BUILTIN_STXVD2X_V2DI:
15588 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15589 case VSX_BUILTIN_STXVW4X_V4SF:
15590 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15591 case VSX_BUILTIN_STXVW4X_V4SI:
15592 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15593 case VSX_BUILTIN_STXVW4X_V8HI:
15594 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15595 case VSX_BUILTIN_STXVW4X_V16QI:
15596 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15598 /* For the following on big endian, it's ok to use any appropriate
15599 unaligned-supporting store, so use a generic expander. For
15600 little-endian, the exact element-reversing instruction must
15601 be used. */
15602 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15604 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15605 : CODE_FOR_vsx_st_elemrev_v2df);
15606 return altivec_expand_stv_builtin (code, exp);
15608 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15610 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15611 : CODE_FOR_vsx_st_elemrev_v2di);
15612 return altivec_expand_stv_builtin (code, exp);
15614 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15616 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15617 : CODE_FOR_vsx_st_elemrev_v4sf);
15618 return altivec_expand_stv_builtin (code, exp);
15620 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15622 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15623 : CODE_FOR_vsx_st_elemrev_v4si);
15624 return altivec_expand_stv_builtin (code, exp);
15626 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15628 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15629 : CODE_FOR_vsx_st_elemrev_v8hi);
15630 return altivec_expand_stv_builtin (code, exp);
15632 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15634 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15635 : CODE_FOR_vsx_st_elemrev_v16qi);
15636 return altivec_expand_stv_builtin (code, exp);
15639 case ALTIVEC_BUILTIN_MFVSCR:
15640 icode = CODE_FOR_altivec_mfvscr;
15641 tmode = insn_data[icode].operand[0].mode;
15643 if (target == 0
15644 || GET_MODE (target) != tmode
15645 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15646 target = gen_reg_rtx (tmode);
15648 pat = GEN_FCN (icode) (target);
15649 if (! pat)
15650 return 0;
15651 emit_insn (pat);
15652 return target;
15654 case ALTIVEC_BUILTIN_MTVSCR:
15655 icode = CODE_FOR_altivec_mtvscr;
15656 arg0 = CALL_EXPR_ARG (exp, 0);
15657 op0 = expand_normal (arg0);
15658 mode0 = insn_data[icode].operand[0].mode;
15660 /* If we got invalid arguments bail out before generating bad rtl. */
15661 if (arg0 == error_mark_node)
15662 return const0_rtx;
15664 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15665 op0 = copy_to_mode_reg (mode0, op0);
15667 pat = GEN_FCN (icode) (op0);
15668 if (pat)
15669 emit_insn (pat);
15670 return NULL_RTX;
15672 case ALTIVEC_BUILTIN_DSSALL:
15673 emit_insn (gen_altivec_dssall ());
15674 return NULL_RTX;
15676 case ALTIVEC_BUILTIN_DSS:
15677 icode = CODE_FOR_altivec_dss;
15678 arg0 = CALL_EXPR_ARG (exp, 0);
15679 STRIP_NOPS (arg0);
15680 op0 = expand_normal (arg0);
15681 mode0 = insn_data[icode].operand[0].mode;
15683 /* If we got invalid arguments bail out before generating bad rtl. */
15684 if (arg0 == error_mark_node)
15685 return const0_rtx;
15687 if (TREE_CODE (arg0) != INTEGER_CST
15688 || TREE_INT_CST_LOW (arg0) & ~0x3)
15690 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15691 return const0_rtx;
15694 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15695 op0 = copy_to_mode_reg (mode0, op0);
15697 emit_insn (gen_altivec_dss (op0));
15698 return NULL_RTX;
15700 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15701 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15702 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15703 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15704 case VSX_BUILTIN_VEC_INIT_V2DF:
15705 case VSX_BUILTIN_VEC_INIT_V2DI:
15706 case VSX_BUILTIN_VEC_INIT_V1TI:
15707 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15709 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15710 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15711 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15712 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15713 case VSX_BUILTIN_VEC_SET_V2DF:
15714 case VSX_BUILTIN_VEC_SET_V2DI:
15715 case VSX_BUILTIN_VEC_SET_V1TI:
15716 return altivec_expand_vec_set_builtin (exp);
15718 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15719 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15720 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15721 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15722 case VSX_BUILTIN_VEC_EXT_V2DF:
15723 case VSX_BUILTIN_VEC_EXT_V2DI:
15724 case VSX_BUILTIN_VEC_EXT_V1TI:
15725 return altivec_expand_vec_ext_builtin (exp, target);
15727 case P9V_BUILTIN_VEXTRACT4B:
15728 case P9V_BUILTIN_VEC_VEXTRACT4B:
15729 arg1 = CALL_EXPR_ARG (exp, 1);
15730 STRIP_NOPS (arg1);
15732 /* Generate a normal call if it is invalid. */
15733 if (arg1 == error_mark_node)
15734 return expand_call (exp, target, false);
15736 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15738 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15739 return expand_call (exp, target, false);
15741 break;
15743 case P9V_BUILTIN_VINSERT4B:
15744 case P9V_BUILTIN_VINSERT4B_DI:
15745 case P9V_BUILTIN_VEC_VINSERT4B:
15746 arg2 = CALL_EXPR_ARG (exp, 2);
15747 STRIP_NOPS (arg2);
15749 /* Generate a normal call if it is invalid. */
15750 if (arg2 == error_mark_node)
15751 return expand_call (exp, target, false);
15753 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15755 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15756 return expand_call (exp, target, false);
15758 break;
15760 default:
15761 break;
15762 /* Fall through. */
15765 /* Expand abs* operations. */
15766 d = bdesc_abs;
15767 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15768 if (d->code == fcode)
15769 return altivec_expand_abs_builtin (d->icode, exp, target);
15771 /* Expand the AltiVec predicates. */
15772 d = bdesc_altivec_preds;
15773 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15774 if (d->code == fcode)
15775 return altivec_expand_predicate_builtin (d->icode, exp, target);
15777 /* LV* are funky. We initialized them differently. */
15778 switch (fcode)
15780 case ALTIVEC_BUILTIN_LVSL:
15781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15782 exp, target, false);
15783 case ALTIVEC_BUILTIN_LVSR:
15784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15785 exp, target, false);
15786 case ALTIVEC_BUILTIN_LVEBX:
15787 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15788 exp, target, false);
15789 case ALTIVEC_BUILTIN_LVEHX:
15790 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15791 exp, target, false);
15792 case ALTIVEC_BUILTIN_LVEWX:
15793 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15794 exp, target, false);
15795 case ALTIVEC_BUILTIN_LVXL_V2DF:
15796 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15797 exp, target, false);
15798 case ALTIVEC_BUILTIN_LVXL_V2DI:
15799 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15800 exp, target, false);
15801 case ALTIVEC_BUILTIN_LVXL_V4SF:
15802 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15803 exp, target, false);
15804 case ALTIVEC_BUILTIN_LVXL:
15805 case ALTIVEC_BUILTIN_LVXL_V4SI:
15806 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15807 exp, target, false);
15808 case ALTIVEC_BUILTIN_LVXL_V8HI:
15809 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15810 exp, target, false);
15811 case ALTIVEC_BUILTIN_LVXL_V16QI:
15812 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15813 exp, target, false);
15814 case ALTIVEC_BUILTIN_LVX_V2DF:
15815 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15816 exp, target, false);
15817 case ALTIVEC_BUILTIN_LVX_V2DI:
15818 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15819 exp, target, false);
15820 case ALTIVEC_BUILTIN_LVX_V4SF:
15821 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15822 exp, target, false);
15823 case ALTIVEC_BUILTIN_LVX:
15824 case ALTIVEC_BUILTIN_LVX_V4SI:
15825 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15826 exp, target, false);
15827 case ALTIVEC_BUILTIN_LVX_V8HI:
15828 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15829 exp, target, false);
15830 case ALTIVEC_BUILTIN_LVX_V16QI:
15831 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15832 exp, target, false);
15833 case ALTIVEC_BUILTIN_LVLX:
15834 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15835 exp, target, true);
15836 case ALTIVEC_BUILTIN_LVLXL:
15837 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15838 exp, target, true);
15839 case ALTIVEC_BUILTIN_LVRX:
15840 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15841 exp, target, true);
15842 case ALTIVEC_BUILTIN_LVRXL:
15843 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15844 exp, target, true);
15845 case VSX_BUILTIN_LXVD2X_V1TI:
15846 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15847 exp, target, false);
15848 case VSX_BUILTIN_LXVD2X_V2DF:
15849 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15850 exp, target, false);
15851 case VSX_BUILTIN_LXVD2X_V2DI:
15852 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15853 exp, target, false);
15854 case VSX_BUILTIN_LXVW4X_V4SF:
15855 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15856 exp, target, false);
15857 case VSX_BUILTIN_LXVW4X_V4SI:
15858 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15859 exp, target, false);
15860 case VSX_BUILTIN_LXVW4X_V8HI:
15861 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15862 exp, target, false);
15863 case VSX_BUILTIN_LXVW4X_V16QI:
15864 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15865 exp, target, false);
15866 /* For the following on big endian, it's ok to use any appropriate
15867 unaligned-supporting load, so use a generic expander. For
15868 little-endian, the exact element-reversing instruction must
15869 be used. */
15870 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15872 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15873 : CODE_FOR_vsx_ld_elemrev_v2df);
15874 return altivec_expand_lv_builtin (code, exp, target, false);
15876 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15878 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15879 : CODE_FOR_vsx_ld_elemrev_v2di);
15880 return altivec_expand_lv_builtin (code, exp, target, false);
15882 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15884 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15885 : CODE_FOR_vsx_ld_elemrev_v4sf);
15886 return altivec_expand_lv_builtin (code, exp, target, false);
15888 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15890 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15891 : CODE_FOR_vsx_ld_elemrev_v4si);
15892 return altivec_expand_lv_builtin (code, exp, target, false);
15894 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15896 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15897 : CODE_FOR_vsx_ld_elemrev_v8hi);
15898 return altivec_expand_lv_builtin (code, exp, target, false);
15900 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15902 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15903 : CODE_FOR_vsx_ld_elemrev_v16qi);
15904 return altivec_expand_lv_builtin (code, exp, target, false);
15906 break;
15907 default:
15908 break;
15909 /* Fall through. */
15912 /* XL_BE We initialized them to always load in big endian order. */
15913 switch (fcode)
15915 case VSX_BUILTIN_XL_BE_V2DI:
15917 enum insn_code code = CODE_FOR_vsx_load_v2di;
15918 return altivec_expand_xl_be_builtin (code, exp, target, false);
15920 break;
15921 case VSX_BUILTIN_XL_BE_V4SI:
15923 enum insn_code code = CODE_FOR_vsx_load_v4si;
15924 return altivec_expand_xl_be_builtin (code, exp, target, false);
15926 break;
15927 case VSX_BUILTIN_XL_BE_V8HI:
15929 enum insn_code code = CODE_FOR_vsx_load_v8hi;
15930 return altivec_expand_xl_be_builtin (code, exp, target, false);
15932 break;
15933 case VSX_BUILTIN_XL_BE_V16QI:
15935 enum insn_code code = CODE_FOR_vsx_load_v16qi;
15936 return altivec_expand_xl_be_builtin (code, exp, target, false);
15938 break;
15939 case VSX_BUILTIN_XL_BE_V2DF:
15941 enum insn_code code = CODE_FOR_vsx_load_v2df;
15942 return altivec_expand_xl_be_builtin (code, exp, target, false);
15944 break;
15945 case VSX_BUILTIN_XL_BE_V4SF:
15947 enum insn_code code = CODE_FOR_vsx_load_v4sf;
15948 return altivec_expand_xl_be_builtin (code, exp, target, false);
15950 break;
15951 default:
15952 break;
15953 /* Fall through. */
15956 *expandedp = false;
15957 return NULL_RTX;
15960 /* Expand the builtin in EXP and store the result in TARGET. Store
15961 true in *EXPANDEDP if we found a builtin to expand. */
15962 static rtx
15963 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15965 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15966 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15967 const struct builtin_description *d;
15968 size_t i;
15970 *expandedp = true;
15972 switch (fcode)
15974 case PAIRED_BUILTIN_STX:
15975 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15976 case PAIRED_BUILTIN_LX:
15977 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15978 default:
15979 break;
15980 /* Fall through. */
15983 /* Expand the paired predicates. */
15984 d = bdesc_paired_preds;
15985 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15986 if (d->code == fcode)
15987 return paired_expand_predicate_builtin (d->icode, exp, target);
15989 *expandedp = false;
15990 return NULL_RTX;
15993 static rtx
15994 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15996 rtx pat, scratch, tmp;
15997 tree form = CALL_EXPR_ARG (exp, 0);
15998 tree arg0 = CALL_EXPR_ARG (exp, 1);
15999 tree arg1 = CALL_EXPR_ARG (exp, 2);
16000 rtx op0 = expand_normal (arg0);
16001 rtx op1 = expand_normal (arg1);
16002 machine_mode mode0 = insn_data[icode].operand[1].mode;
16003 machine_mode mode1 = insn_data[icode].operand[2].mode;
16004 int form_int;
16005 enum rtx_code code;
16007 if (TREE_CODE (form) != INTEGER_CST)
16009 error ("argument 1 of %s must be a constant",
16010 "__builtin_paired_predicate");
16011 return const0_rtx;
16013 else
16014 form_int = TREE_INT_CST_LOW (form);
16016 gcc_assert (mode0 == mode1);
16018 if (arg0 == error_mark_node || arg1 == error_mark_node)
16019 return const0_rtx;
16021 if (target == 0
16022 || GET_MODE (target) != SImode
16023 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
16024 target = gen_reg_rtx (SImode);
16025 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16026 op0 = copy_to_mode_reg (mode0, op0);
16027 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16028 op1 = copy_to_mode_reg (mode1, op1);
16030 scratch = gen_reg_rtx (CCFPmode);
16032 pat = GEN_FCN (icode) (scratch, op0, op1);
16033 if (!pat)
16034 return const0_rtx;
16036 emit_insn (pat);
16038 switch (form_int)
16040 /* LT bit. */
16041 case 0:
16042 code = LT;
16043 break;
16044 /* GT bit. */
16045 case 1:
16046 code = GT;
16047 break;
16048 /* EQ bit. */
16049 case 2:
16050 code = EQ;
16051 break;
16052 /* UN bit. */
16053 case 3:
16054 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16055 return target;
16056 default:
16057 error ("argument 1 of %qs is out of range",
16058 "__builtin_paired_predicate");
16059 return const0_rtx;
16062 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16063 emit_move_insn (target, tmp);
16064 return target;
16067 /* Raise an error message for a builtin function that is called without the
16068 appropriate target options being set. */
16070 static void
16071 rs6000_invalid_builtin (enum rs6000_builtins fncode)
16073 size_t uns_fncode = (size_t) fncode;
16074 const char *name = rs6000_builtin_info[uns_fncode].name;
16075 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
16077 gcc_assert (name != NULL);
16078 if ((fnmask & RS6000_BTM_CELL) != 0)
16079 error ("builtin function %qs is only valid for the cell processor", name);
16080 else if ((fnmask & RS6000_BTM_VSX) != 0)
16081 error ("builtin function %qs requires the %qs option", name, "-mvsx");
16082 else if ((fnmask & RS6000_BTM_HTM) != 0)
16083 error ("builtin function %qs requires the %qs option", name, "-mhtm");
16084 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16085 error ("builtin function %qs requires the %qs option", name, "-maltivec");
16086 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16087 error ("builtin function %qs requires the %qs option", name, "-mpaired");
16088 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16089 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16090 error ("builtin function %qs requires the %qs and %qs options",
16091 name, "-mhard-dfp", "-mpower8-vector");
16092 else if ((fnmask & RS6000_BTM_DFP) != 0)
16093 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
16094 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16095 error ("builtin function %qs requires the %qs option", name,
16096 "-mpower8-vector");
16097 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16098 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16099 error ("builtin function %qs requires the %qs and %qs options",
16100 name, "-mcpu=power9", "-m64");
16101 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16102 error ("builtin function %qs requires the %qs option", name,
16103 "-mcpu=power9");
16104 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16105 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16106 error ("builtin function %qs requires the %qs and %qs options",
16107 name, "-mcpu=power9", "-m64");
16108 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16109 error ("builtin function %qs requires the %qs option", name,
16110 "-mcpu=power9");
16111 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16112 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16113 error ("builtin function %qs requires the %qs and %qs options",
16114 name, "-mhard-float", "-mlong-double-128");
16115 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16116 error ("builtin function %qs requires the %qs option", name,
16117 "-mhard-float");
16118 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16119 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
16120 else
16121 error ("builtin function %qs is not supported with the current options",
16122 name);
16125 /* Target hook for early folding of built-ins, shamelessly stolen
16126 from ia64.c. */
16128 static tree
16129 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
16130 tree *args, bool ignore ATTRIBUTE_UNUSED)
16132 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
16134 enum rs6000_builtins fn_code
16135 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16136 switch (fn_code)
16138 case RS6000_BUILTIN_NANQ:
16139 case RS6000_BUILTIN_NANSQ:
16141 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16142 const char *str = c_getstr (*args);
16143 int quiet = fn_code == RS6000_BUILTIN_NANQ;
16144 REAL_VALUE_TYPE real;
16146 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
16147 return build_real (type, real);
16148 return NULL_TREE;
16150 case RS6000_BUILTIN_INFQ:
16151 case RS6000_BUILTIN_HUGE_VALQ:
16153 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16154 REAL_VALUE_TYPE inf;
16155 real_inf (&inf);
16156 return build_real (type, inf);
16158 default:
16159 break;
16162 #ifdef SUBTARGET_FOLD_BUILTIN
16163 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16164 #else
16165 return NULL_TREE;
16166 #endif
16169 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16170 a constant, use rs6000_fold_builtin.) */
16172 bool
16173 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16175 gimple *stmt = gsi_stmt (*gsi);
16176 tree fndecl = gimple_call_fndecl (stmt);
16177 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16178 enum rs6000_builtins fn_code
16179 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16180 tree arg0, arg1, lhs;
16182 size_t uns_fncode = (size_t) fn_code;
16183 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16184 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16185 const char *fn_name2 = (icode != CODE_FOR_nothing)
16186 ? get_insn_name ((int) icode)
16187 : "nothing";
16189 if (TARGET_DEBUG_BUILTIN)
16190 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16191 fn_code, fn_name1, fn_name2);
16193 if (!rs6000_fold_gimple)
16194 return false;
16196 /* Generic solution to prevent gimple folding of code without a LHS. */
16197 if (!gimple_call_lhs (stmt))
16198 return false;
16200 switch (fn_code)
16202 /* Flavors of vec_add. We deliberately don't expand
16203 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16204 TImode, resulting in much poorer code generation. */
16205 case ALTIVEC_BUILTIN_VADDUBM:
16206 case ALTIVEC_BUILTIN_VADDUHM:
16207 case ALTIVEC_BUILTIN_VADDUWM:
16208 case P8V_BUILTIN_VADDUDM:
16209 case ALTIVEC_BUILTIN_VADDFP:
16210 case VSX_BUILTIN_XVADDDP:
16212 arg0 = gimple_call_arg (stmt, 0);
16213 arg1 = gimple_call_arg (stmt, 1);
16214 lhs = gimple_call_lhs (stmt);
16215 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16216 gimple_set_location (g, gimple_location (stmt));
16217 gsi_replace (gsi, g, true);
16218 return true;
16220 /* Flavors of vec_sub. We deliberately don't expand
16221 P8V_BUILTIN_VSUBUQM. */
16222 case ALTIVEC_BUILTIN_VSUBUBM:
16223 case ALTIVEC_BUILTIN_VSUBUHM:
16224 case ALTIVEC_BUILTIN_VSUBUWM:
16225 case P8V_BUILTIN_VSUBUDM:
16226 case ALTIVEC_BUILTIN_VSUBFP:
16227 case VSX_BUILTIN_XVSUBDP:
16229 arg0 = gimple_call_arg (stmt, 0);
16230 arg1 = gimple_call_arg (stmt, 1);
16231 lhs = gimple_call_lhs (stmt);
16232 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16233 gimple_set_location (g, gimple_location (stmt));
16234 gsi_replace (gsi, g, true);
16235 return true;
16237 case VSX_BUILTIN_XVMULSP:
16238 case VSX_BUILTIN_XVMULDP:
16240 arg0 = gimple_call_arg (stmt, 0);
16241 arg1 = gimple_call_arg (stmt, 1);
16242 lhs = gimple_call_lhs (stmt);
16243 gimple *g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16244 gimple_set_location (g, gimple_location (stmt));
16245 gsi_replace (gsi, g, true);
16246 return true;
16248 /* Even element flavors of vec_mul (signed). */
16249 case ALTIVEC_BUILTIN_VMULESB:
16250 case ALTIVEC_BUILTIN_VMULESH:
16251 /* Even element flavors of vec_mul (unsigned). */
16252 case ALTIVEC_BUILTIN_VMULEUB:
16253 case ALTIVEC_BUILTIN_VMULEUH:
16255 arg0 = gimple_call_arg (stmt, 0);
16256 arg1 = gimple_call_arg (stmt, 1);
16257 lhs = gimple_call_lhs (stmt);
16258 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16259 gimple_set_location (g, gimple_location (stmt));
16260 gsi_replace (gsi, g, true);
16261 return true;
16263 /* Odd element flavors of vec_mul (signed). */
16264 case ALTIVEC_BUILTIN_VMULOSB:
16265 case ALTIVEC_BUILTIN_VMULOSH:
16266 /* Odd element flavors of vec_mul (unsigned). */
16267 case ALTIVEC_BUILTIN_VMULOUB:
16268 case ALTIVEC_BUILTIN_VMULOUH:
16270 arg0 = gimple_call_arg (stmt, 0);
16271 arg1 = gimple_call_arg (stmt, 1);
16272 lhs = gimple_call_lhs (stmt);
16273 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16274 gimple_set_location (g, gimple_location (stmt));
16275 gsi_replace (gsi, g, true);
16276 return true;
16278 /* Flavors of vec_div (Integer). */
16279 case VSX_BUILTIN_DIV_V2DI:
16280 case VSX_BUILTIN_UDIV_V2DI:
16282 arg0 = gimple_call_arg (stmt, 0);
16283 arg1 = gimple_call_arg (stmt, 1);
16284 lhs = gimple_call_lhs (stmt);
16285 gimple *g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16286 gimple_set_location (g, gimple_location (stmt));
16287 gsi_replace (gsi, g, true);
16288 return true;
16290 /* Flavors of vec_div (Float). */
16291 case VSX_BUILTIN_XVDIVSP:
16292 case VSX_BUILTIN_XVDIVDP:
16294 arg0 = gimple_call_arg (stmt, 0);
16295 arg1 = gimple_call_arg (stmt, 1);
16296 lhs = gimple_call_lhs (stmt);
16297 gimple *g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16298 gimple_set_location (g, gimple_location (stmt));
16299 gsi_replace (gsi, g, true);
16300 return true;
16302 /* Flavors of vec_and. */
16303 case ALTIVEC_BUILTIN_VAND:
16305 arg0 = gimple_call_arg (stmt, 0);
16306 arg1 = gimple_call_arg (stmt, 1);
16307 lhs = gimple_call_lhs (stmt);
16308 gimple *g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16309 gimple_set_location (g, gimple_location (stmt));
16310 gsi_replace (gsi, g, true);
16311 return true;
16313 /* Flavors of vec_andc. */
16314 case ALTIVEC_BUILTIN_VANDC:
16316 arg0 = gimple_call_arg (stmt, 0);
16317 arg1 = gimple_call_arg (stmt, 1);
16318 lhs = gimple_call_lhs (stmt);
16319 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16320 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16321 gimple_set_location (g, gimple_location (stmt));
16322 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16323 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16324 gimple_set_location (g, gimple_location (stmt));
16325 gsi_replace (gsi, g, true);
16326 return true;
16328 /* Flavors of vec_nand. */
16329 case P8V_BUILTIN_VEC_NAND:
16330 case P8V_BUILTIN_NAND_V16QI:
16331 case P8V_BUILTIN_NAND_V8HI:
16332 case P8V_BUILTIN_NAND_V4SI:
16333 case P8V_BUILTIN_NAND_V4SF:
16334 case P8V_BUILTIN_NAND_V2DF:
16335 case P8V_BUILTIN_NAND_V2DI:
16337 arg0 = gimple_call_arg (stmt, 0);
16338 arg1 = gimple_call_arg (stmt, 1);
16339 lhs = gimple_call_lhs (stmt);
16340 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16341 gimple *g = gimple_build_assign(temp, BIT_AND_EXPR, arg0, arg1);
16342 gimple_set_location (g, gimple_location (stmt));
16343 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16344 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16345 gimple_set_location (g, gimple_location (stmt));
16346 gsi_replace (gsi, g, true);
16347 return true;
16349 /* Flavors of vec_or. */
16350 case ALTIVEC_BUILTIN_VOR:
16352 arg0 = gimple_call_arg (stmt, 0);
16353 arg1 = gimple_call_arg (stmt, 1);
16354 lhs = gimple_call_lhs (stmt);
16355 gimple *g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16356 gimple_set_location (g, gimple_location (stmt));
16357 gsi_replace (gsi, g, true);
16358 return true;
16360 /* flavors of vec_orc. */
16361 case P8V_BUILTIN_ORC_V16QI:
16362 case P8V_BUILTIN_ORC_V8HI:
16363 case P8V_BUILTIN_ORC_V4SI:
16364 case P8V_BUILTIN_ORC_V4SF:
16365 case P8V_BUILTIN_ORC_V2DF:
16366 case P8V_BUILTIN_ORC_V2DI:
16368 arg0 = gimple_call_arg (stmt, 0);
16369 arg1 = gimple_call_arg (stmt, 1);
16370 lhs = gimple_call_lhs (stmt);
16371 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16372 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16373 gimple_set_location (g, gimple_location (stmt));
16374 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16375 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16376 gimple_set_location (g, gimple_location (stmt));
16377 gsi_replace (gsi, g, true);
16378 return true;
16380 /* Flavors of vec_xor. */
16381 case ALTIVEC_BUILTIN_VXOR:
16383 arg0 = gimple_call_arg (stmt, 0);
16384 arg1 = gimple_call_arg (stmt, 1);
16385 lhs = gimple_call_lhs (stmt);
16386 gimple *g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16387 gimple_set_location (g, gimple_location (stmt));
16388 gsi_replace (gsi, g, true);
16389 return true;
16391 /* Flavors of vec_nor. */
16392 case ALTIVEC_BUILTIN_VNOR:
16394 arg0 = gimple_call_arg (stmt, 0);
16395 arg1 = gimple_call_arg (stmt, 1);
16396 lhs = gimple_call_lhs (stmt);
16397 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16398 gimple *g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16399 gimple_set_location (g, gimple_location (stmt));
16400 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16401 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16402 gimple_set_location (g, gimple_location (stmt));
16403 gsi_replace (gsi, g, true);
16404 return true;
16406 /* flavors of vec_abs. */
16407 case ALTIVEC_BUILTIN_ABS_V16QI:
16408 case ALTIVEC_BUILTIN_ABS_V8HI:
16409 case ALTIVEC_BUILTIN_ABS_V4SI:
16410 case ALTIVEC_BUILTIN_ABS_V4SF:
16411 case P8V_BUILTIN_ABS_V2DI:
16412 case VSX_BUILTIN_XVABSDP:
16414 arg0 = gimple_call_arg (stmt, 0);
16415 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16416 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16417 return false;
16418 lhs = gimple_call_lhs (stmt);
16419 gimple *g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16420 gimple_set_location (g, gimple_location (stmt));
16421 gsi_replace (gsi, g, true);
16422 return true;
16424 /* flavors of vec_min. */
16425 case VSX_BUILTIN_XVMINDP:
16426 case P8V_BUILTIN_VMINSD:
16427 case P8V_BUILTIN_VMINUD:
16428 case ALTIVEC_BUILTIN_VMINSB:
16429 case ALTIVEC_BUILTIN_VMINSH:
16430 case ALTIVEC_BUILTIN_VMINSW:
16431 case ALTIVEC_BUILTIN_VMINUB:
16432 case ALTIVEC_BUILTIN_VMINUH:
16433 case ALTIVEC_BUILTIN_VMINUW:
16434 case ALTIVEC_BUILTIN_VMINFP:
16436 arg0 = gimple_call_arg (stmt, 0);
16437 arg1 = gimple_call_arg (stmt, 1);
16438 lhs = gimple_call_lhs (stmt);
16439 gimple *g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16440 gimple_set_location (g, gimple_location (stmt));
16441 gsi_replace (gsi, g, true);
16442 return true;
16444 /* flavors of vec_max. */
16445 case VSX_BUILTIN_XVMAXDP:
16446 case P8V_BUILTIN_VMAXSD:
16447 case P8V_BUILTIN_VMAXUD:
16448 case ALTIVEC_BUILTIN_VMAXSB:
16449 case ALTIVEC_BUILTIN_VMAXSH:
16450 case ALTIVEC_BUILTIN_VMAXSW:
16451 case ALTIVEC_BUILTIN_VMAXUB:
16452 case ALTIVEC_BUILTIN_VMAXUH:
16453 case ALTIVEC_BUILTIN_VMAXUW:
16454 case ALTIVEC_BUILTIN_VMAXFP:
16456 arg0 = gimple_call_arg (stmt, 0);
16457 arg1 = gimple_call_arg (stmt, 1);
16458 lhs = gimple_call_lhs (stmt);
16459 gimple *g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16460 gimple_set_location (g, gimple_location (stmt));
16461 gsi_replace (gsi, g, true);
16462 return true;
16464 /* Flavors of vec_eqv. */
16465 case P8V_BUILTIN_EQV_V16QI:
16466 case P8V_BUILTIN_EQV_V8HI:
16467 case P8V_BUILTIN_EQV_V4SI:
16468 case P8V_BUILTIN_EQV_V4SF:
16469 case P8V_BUILTIN_EQV_V2DF:
16470 case P8V_BUILTIN_EQV_V2DI:
16472 arg0 = gimple_call_arg (stmt, 0);
16473 arg1 = gimple_call_arg (stmt, 1);
16474 lhs = gimple_call_lhs (stmt);
16475 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16476 gimple *g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16477 gimple_set_location (g, gimple_location (stmt));
16478 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16479 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16480 gimple_set_location (g, gimple_location (stmt));
16481 gsi_replace (gsi, g, true);
16482 return true;
16484 /* Flavors of vec_rotate_left. */
16485 case ALTIVEC_BUILTIN_VRLB:
16486 case ALTIVEC_BUILTIN_VRLH:
16487 case ALTIVEC_BUILTIN_VRLW:
16488 case P8V_BUILTIN_VRLD:
16490 arg0 = gimple_call_arg (stmt, 0);
16491 arg1 = gimple_call_arg (stmt, 1);
16492 lhs = gimple_call_lhs (stmt);
16493 gimple *g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16494 gimple_set_location (g, gimple_location (stmt));
16495 gsi_replace (gsi, g, true);
16496 return true;
16498 /* Flavors of vector shift right algebraic.
16499 vec_sra{b,h,w} -> vsra{b,h,w}. */
16500 case ALTIVEC_BUILTIN_VSRAB:
16501 case ALTIVEC_BUILTIN_VSRAH:
16502 case ALTIVEC_BUILTIN_VSRAW:
16503 case P8V_BUILTIN_VSRAD:
16505 arg0 = gimple_call_arg (stmt, 0);
16506 arg1 = gimple_call_arg (stmt, 1);
16507 lhs = gimple_call_lhs (stmt);
16508 gimple *g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16509 gimple_set_location (g, gimple_location (stmt));
16510 gsi_replace (gsi, g, true);
16511 return true;
16513 /* Flavors of vector shift left.
16514 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16515 case ALTIVEC_BUILTIN_VSLB:
16516 case ALTIVEC_BUILTIN_VSLH:
16517 case ALTIVEC_BUILTIN_VSLW:
16518 case P8V_BUILTIN_VSLD:
16520 arg0 = gimple_call_arg (stmt, 0);
16521 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16522 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16523 return false;
16524 arg1 = gimple_call_arg (stmt, 1);
16525 lhs = gimple_call_lhs (stmt);
16526 gimple *g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16527 gimple_set_location (g, gimple_location (stmt));
16528 gsi_replace (gsi, g, true);
16529 return true;
16531 /* Flavors of vector shift right. */
16532 case ALTIVEC_BUILTIN_VSRB:
16533 case ALTIVEC_BUILTIN_VSRH:
16534 case ALTIVEC_BUILTIN_VSRW:
16535 case P8V_BUILTIN_VSRD:
16537 arg0 = gimple_call_arg (stmt, 0);
16538 arg1 = gimple_call_arg (stmt, 1);
16539 lhs = gimple_call_lhs (stmt);
16540 gimple_seq stmts = NULL;
16541 /* Convert arg0 to unsigned. */
16542 tree arg0_unsigned
16543 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16544 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16545 tree res
16546 = gimple_build (&stmts, RSHIFT_EXPR,
16547 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16548 /* Convert result back to the lhs type. */
16549 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16550 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16551 update_call_from_tree (gsi, res);
16552 return true;
16554 default:
16555 if (TARGET_DEBUG_BUILTIN)
16556 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16557 fn_code, fn_name1, fn_name2);
16558 break;
16561 return false;
16564 /* Expand an expression EXP that calls a built-in function,
16565 with result going to TARGET if that's convenient
16566 (and in mode MODE if that's convenient).
16567 SUBTARGET may be used as the target for computing one of EXP's operands.
16568 IGNORE is nonzero if the value is to be ignored. */
16570 static rtx
16571 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16572 machine_mode mode ATTRIBUTE_UNUSED,
16573 int ignore ATTRIBUTE_UNUSED)
16575 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16576 enum rs6000_builtins fcode
16577 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16578 size_t uns_fcode = (size_t)fcode;
16579 const struct builtin_description *d;
16580 size_t i;
16581 rtx ret;
16582 bool success;
16583 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16584 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16586 if (TARGET_DEBUG_BUILTIN)
16588 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16589 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16590 const char *name2 = (icode != CODE_FOR_nothing)
16591 ? get_insn_name ((int) icode)
16592 : "nothing";
16593 const char *name3;
16595 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16597 default: name3 = "unknown"; break;
16598 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16599 case RS6000_BTC_UNARY: name3 = "unary"; break;
16600 case RS6000_BTC_BINARY: name3 = "binary"; break;
16601 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16602 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16603 case RS6000_BTC_ABS: name3 = "abs"; break;
16604 case RS6000_BTC_DST: name3 = "dst"; break;
16608 fprintf (stderr,
16609 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16610 (name1) ? name1 : "---", fcode,
16611 (name2) ? name2 : "---", (int) icode,
16612 name3,
16613 func_valid_p ? "" : ", not valid");
16616 if (!func_valid_p)
16618 rs6000_invalid_builtin (fcode);
16620 /* Given it is invalid, just generate a normal call. */
16621 return expand_call (exp, target, ignore);
16624 switch (fcode)
16626 case RS6000_BUILTIN_RECIP:
16627 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16629 case RS6000_BUILTIN_RECIPF:
16630 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16632 case RS6000_BUILTIN_RSQRTF:
16633 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16635 case RS6000_BUILTIN_RSQRT:
16636 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16638 case POWER7_BUILTIN_BPERMD:
16639 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16640 ? CODE_FOR_bpermd_di
16641 : CODE_FOR_bpermd_si), exp, target);
16643 case RS6000_BUILTIN_GET_TB:
16644 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16645 target);
16647 case RS6000_BUILTIN_MFTB:
16648 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16649 ? CODE_FOR_rs6000_mftb_di
16650 : CODE_FOR_rs6000_mftb_si),
16651 target);
16653 case RS6000_BUILTIN_MFFS:
16654 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16656 case RS6000_BUILTIN_MTFSF:
16657 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16659 case RS6000_BUILTIN_CPU_INIT:
16660 case RS6000_BUILTIN_CPU_IS:
16661 case RS6000_BUILTIN_CPU_SUPPORTS:
16662 return cpu_expand_builtin (fcode, exp, target);
16664 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16665 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16667 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16668 : (int) CODE_FOR_altivec_lvsl_direct);
16669 machine_mode tmode = insn_data[icode].operand[0].mode;
16670 machine_mode mode = insn_data[icode].operand[1].mode;
16671 tree arg;
16672 rtx op, addr, pat;
16674 gcc_assert (TARGET_ALTIVEC);
16676 arg = CALL_EXPR_ARG (exp, 0);
16677 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16678 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16679 addr = memory_address (mode, op);
16680 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16681 op = addr;
16682 else
16684 /* For the load case need to negate the address. */
16685 op = gen_reg_rtx (GET_MODE (addr));
16686 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16688 op = gen_rtx_MEM (mode, op);
16690 if (target == 0
16691 || GET_MODE (target) != tmode
16692 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16693 target = gen_reg_rtx (tmode);
16695 pat = GEN_FCN (icode) (target, op);
16696 if (!pat)
16697 return 0;
16698 emit_insn (pat);
16700 return target;
16703 case ALTIVEC_BUILTIN_VCFUX:
16704 case ALTIVEC_BUILTIN_VCFSX:
16705 case ALTIVEC_BUILTIN_VCTUXS:
16706 case ALTIVEC_BUILTIN_VCTSXS:
16707 /* FIXME: There's got to be a nicer way to handle this case than
16708 constructing a new CALL_EXPR. */
16709 if (call_expr_nargs (exp) == 1)
16711 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16712 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16714 break;
16716 default:
16717 break;
16720 if (TARGET_ALTIVEC)
16722 ret = altivec_expand_builtin (exp, target, &success);
16724 if (success)
16725 return ret;
16727 if (TARGET_PAIRED_FLOAT)
16729 ret = paired_expand_builtin (exp, target, &success);
16731 if (success)
16732 return ret;
16734 if (TARGET_HTM)
16736 ret = htm_expand_builtin (exp, target, &success);
16738 if (success)
16739 return ret;
16742 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16743 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16744 gcc_assert (attr == RS6000_BTC_UNARY
16745 || attr == RS6000_BTC_BINARY
16746 || attr == RS6000_BTC_TERNARY
16747 || attr == RS6000_BTC_SPECIAL);
16749 /* Handle simple unary operations. */
16750 d = bdesc_1arg;
16751 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16752 if (d->code == fcode)
16753 return rs6000_expand_unop_builtin (d->icode, exp, target);
16755 /* Handle simple binary operations. */
16756 d = bdesc_2arg;
16757 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16758 if (d->code == fcode)
16759 return rs6000_expand_binop_builtin (d->icode, exp, target);
16761 /* Handle simple ternary operations. */
16762 d = bdesc_3arg;
16763 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16764 if (d->code == fcode)
16765 return rs6000_expand_ternop_builtin (d->icode, exp, target);
16767 /* Handle simple no-argument operations. */
16768 d = bdesc_0arg;
16769 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16770 if (d->code == fcode)
16771 return rs6000_expand_zeroop_builtin (d->icode, target);
16773 gcc_unreachable ();
16776 /* Create a builtin vector type with a name. Taking care not to give
16777 the canonical type a name. */
16779 static tree
16780 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16782 tree result = build_vector_type (elt_type, num_elts);
16784 /* Copy so we don't give the canonical type a name. */
16785 result = build_variant_type_copy (result);
16787 add_builtin_type (name, result);
16789 return result;
16792 static void
16793 rs6000_init_builtins (void)
16795 tree tdecl;
16796 tree ftype;
16797 machine_mode mode;
16799 if (TARGET_DEBUG_BUILTIN)
16800 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
16801 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16802 (TARGET_ALTIVEC) ? ", altivec" : "",
16803 (TARGET_VSX) ? ", vsx" : "");
16805 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16806 V2SF_type_node = build_vector_type (float_type_node, 2);
16807 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16808 : "__vector long long",
16809 intDI_type_node, 2);
16810 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16811 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16812 intSI_type_node, 4);
16813 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16814 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16815 intHI_type_node, 8);
16816 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16817 intQI_type_node, 16);
16819 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16820 unsigned_intQI_type_node, 16);
16821 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16822 unsigned_intHI_type_node, 8);
16823 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16824 unsigned_intSI_type_node, 4);
16825 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16826 ? "__vector unsigned long"
16827 : "__vector unsigned long long",
16828 unsigned_intDI_type_node, 2);
16830 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16831 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16832 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16833 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16835 const_str_type_node
16836 = build_pointer_type (build_qualified_type (char_type_node,
16837 TYPE_QUAL_CONST));
16839 /* We use V1TI mode as a special container to hold __int128_t items that
16840 must live in VSX registers. */
16841 if (intTI_type_node)
16843 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16844 intTI_type_node, 1);
16845 unsigned_V1TI_type_node
16846 = rs6000_vector_type ("__vector unsigned __int128",
16847 unsigned_intTI_type_node, 1);
16850 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16851 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16852 'vector unsigned short'. */
16854 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16855 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16856 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16857 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16858 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16860 long_integer_type_internal_node = long_integer_type_node;
16861 long_unsigned_type_internal_node = long_unsigned_type_node;
16862 long_long_integer_type_internal_node = long_long_integer_type_node;
16863 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16864 intQI_type_internal_node = intQI_type_node;
16865 uintQI_type_internal_node = unsigned_intQI_type_node;
16866 intHI_type_internal_node = intHI_type_node;
16867 uintHI_type_internal_node = unsigned_intHI_type_node;
16868 intSI_type_internal_node = intSI_type_node;
16869 uintSI_type_internal_node = unsigned_intSI_type_node;
16870 intDI_type_internal_node = intDI_type_node;
16871 uintDI_type_internal_node = unsigned_intDI_type_node;
16872 intTI_type_internal_node = intTI_type_node;
16873 uintTI_type_internal_node = unsigned_intTI_type_node;
16874 float_type_internal_node = float_type_node;
16875 double_type_internal_node = double_type_node;
16876 long_double_type_internal_node = long_double_type_node;
16877 dfloat64_type_internal_node = dfloat64_type_node;
16878 dfloat128_type_internal_node = dfloat128_type_node;
16879 void_type_internal_node = void_type_node;
16881 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16882 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16883 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16884 format that uses a pair of doubles, depending on the switches and
16885 defaults.
16887 We do not enable the actual __float128 keyword unless the user explicitly
16888 asks for it, because the library support is not yet complete.
16890 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16891 floating point, we need make sure the type is non-zero or else self-test
16892 fails during bootstrap.
16894 We don't register a built-in type for __ibm128 if the type is the same as
16895 long double. Instead we add a #define for __ibm128 in
16896 rs6000_cpu_cpp_builtins to long double. */
16897 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
16899 ibm128_float_type_node = make_node (REAL_TYPE);
16900 TYPE_PRECISION (ibm128_float_type_node) = 128;
16901 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16902 layout_type (ibm128_float_type_node);
16904 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16905 "__ibm128");
16907 else
16908 ibm128_float_type_node = long_double_type_node;
16910 if (TARGET_FLOAT128_KEYWORD)
16912 ieee128_float_type_node = float128_type_node;
16913 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16914 "__float128");
16917 else if (TARGET_FLOAT128_TYPE)
16919 ieee128_float_type_node = make_node (REAL_TYPE);
16920 TYPE_PRECISION (ibm128_float_type_node) = 128;
16921 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
16922 layout_type (ieee128_float_type_node);
16924 /* If we are not exporting the __float128/_Float128 keywords, we need a
16925 keyword to get the types created. Use __ieee128 as the dummy
16926 keyword. */
16927 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16928 "__ieee128");
16931 else
16932 ieee128_float_type_node = long_double_type_node;
16934 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16935 tree type node. */
16936 builtin_mode_to_type[QImode][0] = integer_type_node;
16937 builtin_mode_to_type[HImode][0] = integer_type_node;
16938 builtin_mode_to_type[SImode][0] = intSI_type_node;
16939 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16940 builtin_mode_to_type[DImode][0] = intDI_type_node;
16941 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16942 builtin_mode_to_type[TImode][0] = intTI_type_node;
16943 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16944 builtin_mode_to_type[SFmode][0] = float_type_node;
16945 builtin_mode_to_type[DFmode][0] = double_type_node;
16946 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16947 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16948 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16949 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16950 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16951 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16952 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16953 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
16954 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
16955 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16956 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16957 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16958 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16959 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16960 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16961 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16962 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16963 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16964 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16966 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16967 TYPE_NAME (bool_char_type_node) = tdecl;
16969 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16970 TYPE_NAME (bool_short_type_node) = tdecl;
16972 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16973 TYPE_NAME (bool_int_type_node) = tdecl;
16975 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16976 TYPE_NAME (pixel_type_node) = tdecl;
16978 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16979 bool_char_type_node, 16);
16980 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16981 bool_short_type_node, 8);
16982 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16983 bool_int_type_node, 4);
16984 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16985 ? "__vector __bool long"
16986 : "__vector __bool long long",
16987 bool_long_type_node, 2);
16988 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16989 pixel_type_node, 8);
16991 /* Paired builtins are only available if you build a compiler with the
16992 appropriate options, so only create those builtins with the appropriate
16993 compiler option. Create Altivec and VSX builtins on machines with at
16994 least the general purpose extensions (970 and newer) to allow the use of
16995 the target attribute. */
16996 if (TARGET_PAIRED_FLOAT)
16997 paired_init_builtins ();
16998 if (TARGET_EXTRA_BUILTINS)
16999 altivec_init_builtins ();
17000 if (TARGET_HTM)
17001 htm_init_builtins ();
17003 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
17004 rs6000_common_init_builtins ();
17006 ftype = build_function_type_list (ieee128_float_type_node,
17007 const_str_type_node, NULL_TREE);
17008 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
17009 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
17011 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
17012 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
17013 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
17015 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17016 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17017 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17019 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17020 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17021 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17023 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17024 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17025 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17027 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17028 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17029 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17031 mode = (TARGET_64BIT) ? DImode : SImode;
17032 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17033 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17034 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17036 ftype = build_function_type_list (unsigned_intDI_type_node,
17037 NULL_TREE);
17038 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17040 if (TARGET_64BIT)
17041 ftype = build_function_type_list (unsigned_intDI_type_node,
17042 NULL_TREE);
17043 else
17044 ftype = build_function_type_list (unsigned_intSI_type_node,
17045 NULL_TREE);
17046 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17048 ftype = build_function_type_list (double_type_node, NULL_TREE);
17049 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17051 ftype = build_function_type_list (void_type_node,
17052 intSI_type_node, double_type_node,
17053 NULL_TREE);
17054 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17056 ftype = build_function_type_list (void_type_node, NULL_TREE);
17057 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17059 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17060 NULL_TREE);
17061 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17062 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17064 /* AIX libm provides clog as __clog. */
17065 if (TARGET_XCOFF &&
17066 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17067 set_user_assembler_name (tdecl, "__clog");
17069 #ifdef SUBTARGET_INIT_BUILTINS
17070 SUBTARGET_INIT_BUILTINS;
17071 #endif
17074 /* Returns the rs6000 builtin decl for CODE. */
17076 static tree
17077 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17079 HOST_WIDE_INT fnmask;
17081 if (code >= RS6000_BUILTIN_COUNT)
17082 return error_mark_node;
17084 fnmask = rs6000_builtin_info[code].mask;
17085 if ((fnmask & rs6000_builtin_mask) != fnmask)
17087 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17088 return error_mark_node;
17091 return rs6000_builtin_decls[code];
17094 static void
17095 paired_init_builtins (void)
17097 const struct builtin_description *d;
17098 size_t i;
17099 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17101 tree int_ftype_int_v2sf_v2sf
17102 = build_function_type_list (integer_type_node,
17103 integer_type_node,
17104 V2SF_type_node,
17105 V2SF_type_node,
17106 NULL_TREE);
17107 tree pcfloat_type_node =
17108 build_pointer_type (build_qualified_type
17109 (float_type_node, TYPE_QUAL_CONST));
17111 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17112 long_integer_type_node,
17113 pcfloat_type_node,
17114 NULL_TREE);
17115 tree void_ftype_v2sf_long_pcfloat =
17116 build_function_type_list (void_type_node,
17117 V2SF_type_node,
17118 long_integer_type_node,
17119 pcfloat_type_node,
17120 NULL_TREE);
17123 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17124 PAIRED_BUILTIN_LX);
17127 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17128 PAIRED_BUILTIN_STX);
17130 /* Predicates. */
17131 d = bdesc_paired_preds;
17132 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17134 tree type;
17135 HOST_WIDE_INT mask = d->mask;
17137 if ((mask & builtin_mask) != mask)
17139 if (TARGET_DEBUG_BUILTIN)
17140 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17141 d->name);
17142 continue;
17145 /* Cannot define builtin if the instruction is disabled. */
17146 gcc_assert (d->icode != CODE_FOR_nothing);
17148 if (TARGET_DEBUG_BUILTIN)
17149 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17150 (int)i, get_insn_name (d->icode), (int)d->icode,
17151 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17153 switch (insn_data[d->icode].operand[1].mode)
17155 case E_V2SFmode:
17156 type = int_ftype_int_v2sf_v2sf;
17157 break;
17158 default:
17159 gcc_unreachable ();
17162 def_builtin (d->name, type, d->code);
17166 static void
17167 altivec_init_builtins (void)
17169 const struct builtin_description *d;
17170 size_t i;
17171 tree ftype;
17172 tree decl;
17173 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17175 tree pvoid_type_node = build_pointer_type (void_type_node);
17177 tree pcvoid_type_node
17178 = build_pointer_type (build_qualified_type (void_type_node,
17179 TYPE_QUAL_CONST));
17181 tree int_ftype_opaque
17182 = build_function_type_list (integer_type_node,
17183 opaque_V4SI_type_node, NULL_TREE);
17184 tree opaque_ftype_opaque
17185 = build_function_type_list (integer_type_node, NULL_TREE);
17186 tree opaque_ftype_opaque_int
17187 = build_function_type_list (opaque_V4SI_type_node,
17188 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17189 tree opaque_ftype_opaque_opaque_int
17190 = build_function_type_list (opaque_V4SI_type_node,
17191 opaque_V4SI_type_node, opaque_V4SI_type_node,
17192 integer_type_node, NULL_TREE);
17193 tree opaque_ftype_opaque_opaque_opaque
17194 = build_function_type_list (opaque_V4SI_type_node,
17195 opaque_V4SI_type_node, opaque_V4SI_type_node,
17196 opaque_V4SI_type_node, NULL_TREE);
17197 tree opaque_ftype_opaque_opaque
17198 = build_function_type_list (opaque_V4SI_type_node,
17199 opaque_V4SI_type_node, opaque_V4SI_type_node,
17200 NULL_TREE);
17201 tree int_ftype_int_opaque_opaque
17202 = build_function_type_list (integer_type_node,
17203 integer_type_node, opaque_V4SI_type_node,
17204 opaque_V4SI_type_node, NULL_TREE);
17205 tree int_ftype_int_v4si_v4si
17206 = build_function_type_list (integer_type_node,
17207 integer_type_node, V4SI_type_node,
17208 V4SI_type_node, NULL_TREE);
17209 tree int_ftype_int_v2di_v2di
17210 = build_function_type_list (integer_type_node,
17211 integer_type_node, V2DI_type_node,
17212 V2DI_type_node, NULL_TREE);
17213 tree void_ftype_v4si
17214 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17215 tree v8hi_ftype_void
17216 = build_function_type_list (V8HI_type_node, NULL_TREE);
17217 tree void_ftype_void
17218 = build_function_type_list (void_type_node, NULL_TREE);
17219 tree void_ftype_int
17220 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17222 tree opaque_ftype_long_pcvoid
17223 = build_function_type_list (opaque_V4SI_type_node,
17224 long_integer_type_node, pcvoid_type_node,
17225 NULL_TREE);
17226 tree v16qi_ftype_long_pcvoid
17227 = build_function_type_list (V16QI_type_node,
17228 long_integer_type_node, pcvoid_type_node,
17229 NULL_TREE);
17230 tree v8hi_ftype_long_pcvoid
17231 = build_function_type_list (V8HI_type_node,
17232 long_integer_type_node, pcvoid_type_node,
17233 NULL_TREE);
17234 tree v4si_ftype_long_pcvoid
17235 = build_function_type_list (V4SI_type_node,
17236 long_integer_type_node, pcvoid_type_node,
17237 NULL_TREE);
17238 tree v4sf_ftype_long_pcvoid
17239 = build_function_type_list (V4SF_type_node,
17240 long_integer_type_node, pcvoid_type_node,
17241 NULL_TREE);
17242 tree v2df_ftype_long_pcvoid
17243 = build_function_type_list (V2DF_type_node,
17244 long_integer_type_node, pcvoid_type_node,
17245 NULL_TREE);
17246 tree v2di_ftype_long_pcvoid
17247 = build_function_type_list (V2DI_type_node,
17248 long_integer_type_node, pcvoid_type_node,
17249 NULL_TREE);
17251 tree void_ftype_opaque_long_pvoid
17252 = build_function_type_list (void_type_node,
17253 opaque_V4SI_type_node, long_integer_type_node,
17254 pvoid_type_node, NULL_TREE);
17255 tree void_ftype_v4si_long_pvoid
17256 = build_function_type_list (void_type_node,
17257 V4SI_type_node, long_integer_type_node,
17258 pvoid_type_node, NULL_TREE);
17259 tree void_ftype_v16qi_long_pvoid
17260 = build_function_type_list (void_type_node,
17261 V16QI_type_node, long_integer_type_node,
17262 pvoid_type_node, NULL_TREE);
17264 tree void_ftype_v16qi_pvoid_long
17265 = build_function_type_list (void_type_node,
17266 V16QI_type_node, pvoid_type_node,
17267 long_integer_type_node, NULL_TREE);
17269 tree void_ftype_v8hi_long_pvoid
17270 = build_function_type_list (void_type_node,
17271 V8HI_type_node, long_integer_type_node,
17272 pvoid_type_node, NULL_TREE);
17273 tree void_ftype_v4sf_long_pvoid
17274 = build_function_type_list (void_type_node,
17275 V4SF_type_node, long_integer_type_node,
17276 pvoid_type_node, NULL_TREE);
17277 tree void_ftype_v2df_long_pvoid
17278 = build_function_type_list (void_type_node,
17279 V2DF_type_node, long_integer_type_node,
17280 pvoid_type_node, NULL_TREE);
17281 tree void_ftype_v2di_long_pvoid
17282 = build_function_type_list (void_type_node,
17283 V2DI_type_node, long_integer_type_node,
17284 pvoid_type_node, NULL_TREE);
17285 tree int_ftype_int_v8hi_v8hi
17286 = build_function_type_list (integer_type_node,
17287 integer_type_node, V8HI_type_node,
17288 V8HI_type_node, NULL_TREE);
17289 tree int_ftype_int_v16qi_v16qi
17290 = build_function_type_list (integer_type_node,
17291 integer_type_node, V16QI_type_node,
17292 V16QI_type_node, NULL_TREE);
17293 tree int_ftype_int_v4sf_v4sf
17294 = build_function_type_list (integer_type_node,
17295 integer_type_node, V4SF_type_node,
17296 V4SF_type_node, NULL_TREE);
17297 tree int_ftype_int_v2df_v2df
17298 = build_function_type_list (integer_type_node,
17299 integer_type_node, V2DF_type_node,
17300 V2DF_type_node, NULL_TREE);
17301 tree v2di_ftype_v2di
17302 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17303 tree v4si_ftype_v4si
17304 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17305 tree v8hi_ftype_v8hi
17306 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17307 tree v16qi_ftype_v16qi
17308 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17309 tree v4sf_ftype_v4sf
17310 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17311 tree v2df_ftype_v2df
17312 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17313 tree void_ftype_pcvoid_int_int
17314 = build_function_type_list (void_type_node,
17315 pcvoid_type_node, integer_type_node,
17316 integer_type_node, NULL_TREE);
17318 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17319 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17320 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17321 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17322 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17323 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17324 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17325 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17326 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17327 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17328 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17329 ALTIVEC_BUILTIN_LVXL_V2DF);
17330 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17331 ALTIVEC_BUILTIN_LVXL_V2DI);
17332 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17333 ALTIVEC_BUILTIN_LVXL_V4SF);
17334 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17335 ALTIVEC_BUILTIN_LVXL_V4SI);
17336 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17337 ALTIVEC_BUILTIN_LVXL_V8HI);
17338 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17339 ALTIVEC_BUILTIN_LVXL_V16QI);
17340 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17341 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17342 ALTIVEC_BUILTIN_LVX_V2DF);
17343 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17344 ALTIVEC_BUILTIN_LVX_V2DI);
17345 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17346 ALTIVEC_BUILTIN_LVX_V4SF);
17347 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17348 ALTIVEC_BUILTIN_LVX_V4SI);
17349 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17350 ALTIVEC_BUILTIN_LVX_V8HI);
17351 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17352 ALTIVEC_BUILTIN_LVX_V16QI);
17353 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17354 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17355 ALTIVEC_BUILTIN_STVX_V2DF);
17356 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17357 ALTIVEC_BUILTIN_STVX_V2DI);
17358 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17359 ALTIVEC_BUILTIN_STVX_V4SF);
17360 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17361 ALTIVEC_BUILTIN_STVX_V4SI);
17362 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17363 ALTIVEC_BUILTIN_STVX_V8HI);
17364 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17365 ALTIVEC_BUILTIN_STVX_V16QI);
17366 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17367 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17368 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17369 ALTIVEC_BUILTIN_STVXL_V2DF);
17370 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17371 ALTIVEC_BUILTIN_STVXL_V2DI);
17372 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17373 ALTIVEC_BUILTIN_STVXL_V4SF);
17374 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17375 ALTIVEC_BUILTIN_STVXL_V4SI);
17376 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17377 ALTIVEC_BUILTIN_STVXL_V8HI);
17378 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17379 ALTIVEC_BUILTIN_STVXL_V16QI);
17380 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17381 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17382 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17383 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17384 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17385 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17386 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17387 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17388 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17389 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17390 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17391 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17392 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17393 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17394 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17395 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17397 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17398 VSX_BUILTIN_LXVD2X_V2DF);
17399 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17400 VSX_BUILTIN_LXVD2X_V2DI);
17401 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17402 VSX_BUILTIN_LXVW4X_V4SF);
17403 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17404 VSX_BUILTIN_LXVW4X_V4SI);
17405 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17406 VSX_BUILTIN_LXVW4X_V8HI);
17407 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17408 VSX_BUILTIN_LXVW4X_V16QI);
17409 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17410 VSX_BUILTIN_STXVD2X_V2DF);
17411 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17412 VSX_BUILTIN_STXVD2X_V2DI);
17413 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17414 VSX_BUILTIN_STXVW4X_V4SF);
17415 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17416 VSX_BUILTIN_STXVW4X_V4SI);
17417 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17418 VSX_BUILTIN_STXVW4X_V8HI);
17419 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17420 VSX_BUILTIN_STXVW4X_V16QI);
17422 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17423 VSX_BUILTIN_LD_ELEMREV_V2DF);
17424 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17425 VSX_BUILTIN_LD_ELEMREV_V2DI);
17426 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17427 VSX_BUILTIN_LD_ELEMREV_V4SF);
17428 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17429 VSX_BUILTIN_LD_ELEMREV_V4SI);
17430 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17431 VSX_BUILTIN_ST_ELEMREV_V2DF);
17432 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17433 VSX_BUILTIN_ST_ELEMREV_V2DI);
17434 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17435 VSX_BUILTIN_ST_ELEMREV_V4SF);
17436 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17437 VSX_BUILTIN_ST_ELEMREV_V4SI);
17439 def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid,
17440 VSX_BUILTIN_XL_BE_V8HI);
17441 def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid,
17442 VSX_BUILTIN_XL_BE_V4SI);
17443 def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid,
17444 VSX_BUILTIN_XL_BE_V2DI);
17445 def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid,
17446 VSX_BUILTIN_XL_BE_V4SF);
17447 def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid,
17448 VSX_BUILTIN_XL_BE_V2DF);
17449 def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid,
17450 VSX_BUILTIN_XL_BE_V16QI);
17452 if (TARGET_P9_VECTOR)
17454 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17455 VSX_BUILTIN_LD_ELEMREV_V8HI);
17456 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17457 VSX_BUILTIN_LD_ELEMREV_V16QI);
17458 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17459 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
17460 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17461 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
17463 else
17465 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V8HI]
17466 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V8HI];
17467 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V16QI]
17468 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V16QI];
17469 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V8HI]
17470 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V8HI];
17471 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V16QI]
17472 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V16QI];
17475 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17476 VSX_BUILTIN_VEC_LD);
17477 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17478 VSX_BUILTIN_VEC_ST);
17479 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17480 VSX_BUILTIN_VEC_XL);
17481 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17482 VSX_BUILTIN_VEC_XL_BE);
17483 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17484 VSX_BUILTIN_VEC_XST);
17486 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17487 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17488 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17490 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17491 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17492 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17493 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17494 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17495 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17496 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17497 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17498 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17499 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17500 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17501 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17503 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17504 ALTIVEC_BUILTIN_VEC_ADDE);
17505 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17506 ALTIVEC_BUILTIN_VEC_ADDEC);
17507 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17508 ALTIVEC_BUILTIN_VEC_CMPNE);
17509 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17510 ALTIVEC_BUILTIN_VEC_MUL);
17511 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17512 ALTIVEC_BUILTIN_VEC_SUBE);
17513 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17514 ALTIVEC_BUILTIN_VEC_SUBEC);
17516 /* Cell builtins. */
17517 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17518 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17519 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17520 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17522 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17523 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17524 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17525 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17527 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17528 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17529 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17530 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17532 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17533 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17534 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17535 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17537 if (TARGET_P9_VECTOR)
17538 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17539 P9V_BUILTIN_STXVL);
17541 /* Add the DST variants. */
17542 d = bdesc_dst;
17543 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17545 HOST_WIDE_INT mask = d->mask;
17547 /* It is expected that these dst built-in functions may have
17548 d->icode equal to CODE_FOR_nothing. */
17549 if ((mask & builtin_mask) != mask)
17551 if (TARGET_DEBUG_BUILTIN)
17552 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17553 d->name);
17554 continue;
17556 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17559 /* Initialize the predicates. */
17560 d = bdesc_altivec_preds;
17561 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17563 machine_mode mode1;
17564 tree type;
17565 HOST_WIDE_INT mask = d->mask;
17567 if ((mask & builtin_mask) != mask)
17569 if (TARGET_DEBUG_BUILTIN)
17570 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17571 d->name);
17572 continue;
17575 if (rs6000_overloaded_builtin_p (d->code))
17576 mode1 = VOIDmode;
17577 else
17579 /* Cannot define builtin if the instruction is disabled. */
17580 gcc_assert (d->icode != CODE_FOR_nothing);
17581 mode1 = insn_data[d->icode].operand[1].mode;
17584 switch (mode1)
17586 case E_VOIDmode:
17587 type = int_ftype_int_opaque_opaque;
17588 break;
17589 case E_V2DImode:
17590 type = int_ftype_int_v2di_v2di;
17591 break;
17592 case E_V4SImode:
17593 type = int_ftype_int_v4si_v4si;
17594 break;
17595 case E_V8HImode:
17596 type = int_ftype_int_v8hi_v8hi;
17597 break;
17598 case E_V16QImode:
17599 type = int_ftype_int_v16qi_v16qi;
17600 break;
17601 case E_V4SFmode:
17602 type = int_ftype_int_v4sf_v4sf;
17603 break;
17604 case E_V2DFmode:
17605 type = int_ftype_int_v2df_v2df;
17606 break;
17607 default:
17608 gcc_unreachable ();
17611 def_builtin (d->name, type, d->code);
17614 /* Initialize the abs* operators. */
17615 d = bdesc_abs;
17616 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17618 machine_mode mode0;
17619 tree type;
17620 HOST_WIDE_INT mask = d->mask;
17622 if ((mask & builtin_mask) != mask)
17624 if (TARGET_DEBUG_BUILTIN)
17625 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17626 d->name);
17627 continue;
17630 /* Cannot define builtin if the instruction is disabled. */
17631 gcc_assert (d->icode != CODE_FOR_nothing);
17632 mode0 = insn_data[d->icode].operand[0].mode;
17634 switch (mode0)
17636 case E_V2DImode:
17637 type = v2di_ftype_v2di;
17638 break;
17639 case E_V4SImode:
17640 type = v4si_ftype_v4si;
17641 break;
17642 case E_V8HImode:
17643 type = v8hi_ftype_v8hi;
17644 break;
17645 case E_V16QImode:
17646 type = v16qi_ftype_v16qi;
17647 break;
17648 case E_V4SFmode:
17649 type = v4sf_ftype_v4sf;
17650 break;
17651 case E_V2DFmode:
17652 type = v2df_ftype_v2df;
17653 break;
17654 default:
17655 gcc_unreachable ();
17658 def_builtin (d->name, type, d->code);
17661 /* Initialize target builtin that implements
17662 targetm.vectorize.builtin_mask_for_load. */
17664 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17665 v16qi_ftype_long_pcvoid,
17666 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17667 BUILT_IN_MD, NULL, NULL_TREE);
17668 TREE_READONLY (decl) = 1;
17669 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17670 altivec_builtin_mask_for_load = decl;
17672 /* Access to the vec_init patterns. */
17673 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17674 integer_type_node, integer_type_node,
17675 integer_type_node, NULL_TREE);
17676 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17678 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17679 short_integer_type_node,
17680 short_integer_type_node,
17681 short_integer_type_node,
17682 short_integer_type_node,
17683 short_integer_type_node,
17684 short_integer_type_node,
17685 short_integer_type_node, NULL_TREE);
17686 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17688 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17689 char_type_node, char_type_node,
17690 char_type_node, char_type_node,
17691 char_type_node, char_type_node,
17692 char_type_node, char_type_node,
17693 char_type_node, char_type_node,
17694 char_type_node, char_type_node,
17695 char_type_node, char_type_node,
17696 char_type_node, NULL_TREE);
17697 def_builtin ("__builtin_vec_init_v16qi", ftype,
17698 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17700 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17701 float_type_node, float_type_node,
17702 float_type_node, NULL_TREE);
17703 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17705 /* VSX builtins. */
17706 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17707 double_type_node, NULL_TREE);
17708 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17710 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17711 intDI_type_node, NULL_TREE);
17712 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17714 /* Access to the vec_set patterns. */
17715 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17716 intSI_type_node,
17717 integer_type_node, NULL_TREE);
17718 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17720 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17721 intHI_type_node,
17722 integer_type_node, NULL_TREE);
17723 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17725 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17726 intQI_type_node,
17727 integer_type_node, NULL_TREE);
17728 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17730 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17731 float_type_node,
17732 integer_type_node, NULL_TREE);
17733 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17735 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17736 double_type_node,
17737 integer_type_node, NULL_TREE);
17738 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17740 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17741 intDI_type_node,
17742 integer_type_node, NULL_TREE);
17743 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17745 /* Access to the vec_extract patterns. */
17746 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17747 integer_type_node, NULL_TREE);
17748 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17750 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17751 integer_type_node, NULL_TREE);
17752 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17754 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17755 integer_type_node, NULL_TREE);
17756 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17758 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17759 integer_type_node, NULL_TREE);
17760 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17762 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17763 integer_type_node, NULL_TREE);
17764 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17766 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17767 integer_type_node, NULL_TREE);
17768 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17771 if (V1TI_type_node)
17773 tree v1ti_ftype_long_pcvoid
17774 = build_function_type_list (V1TI_type_node,
17775 long_integer_type_node, pcvoid_type_node,
17776 NULL_TREE);
17777 tree void_ftype_v1ti_long_pvoid
17778 = build_function_type_list (void_type_node,
17779 V1TI_type_node, long_integer_type_node,
17780 pvoid_type_node, NULL_TREE);
17781 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17782 VSX_BUILTIN_LXVD2X_V1TI);
17783 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17784 VSX_BUILTIN_STXVD2X_V1TI);
17785 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17786 NULL_TREE, NULL_TREE);
17787 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17788 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17789 intTI_type_node,
17790 integer_type_node, NULL_TREE);
17791 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17792 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17793 integer_type_node, NULL_TREE);
17794 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17799 static void
17800 htm_init_builtins (void)
17802 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17803 const struct builtin_description *d;
17804 size_t i;
17806 d = bdesc_htm;
17807 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17809 tree op[MAX_HTM_OPERANDS], type;
17810 HOST_WIDE_INT mask = d->mask;
17811 unsigned attr = rs6000_builtin_info[d->code].attr;
17812 bool void_func = (attr & RS6000_BTC_VOID);
17813 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17814 int nopnds = 0;
17815 tree gpr_type_node;
17816 tree rettype;
17817 tree argtype;
17819 /* It is expected that these htm built-in functions may have
17820 d->icode equal to CODE_FOR_nothing. */
17822 if (TARGET_32BIT && TARGET_POWERPC64)
17823 gpr_type_node = long_long_unsigned_type_node;
17824 else
17825 gpr_type_node = long_unsigned_type_node;
17827 if (attr & RS6000_BTC_SPR)
17829 rettype = gpr_type_node;
17830 argtype = gpr_type_node;
17832 else if (d->code == HTM_BUILTIN_TABORTDC
17833 || d->code == HTM_BUILTIN_TABORTDCI)
17835 rettype = unsigned_type_node;
17836 argtype = gpr_type_node;
17838 else
17840 rettype = unsigned_type_node;
17841 argtype = unsigned_type_node;
17844 if ((mask & builtin_mask) != mask)
17846 if (TARGET_DEBUG_BUILTIN)
17847 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17848 continue;
17851 if (d->name == 0)
17853 if (TARGET_DEBUG_BUILTIN)
17854 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17855 (long unsigned) i);
17856 continue;
17859 op[nopnds++] = (void_func) ? void_type_node : rettype;
17861 if (attr_args == RS6000_BTC_UNARY)
17862 op[nopnds++] = argtype;
17863 else if (attr_args == RS6000_BTC_BINARY)
17865 op[nopnds++] = argtype;
17866 op[nopnds++] = argtype;
17868 else if (attr_args == RS6000_BTC_TERNARY)
17870 op[nopnds++] = argtype;
17871 op[nopnds++] = argtype;
17872 op[nopnds++] = argtype;
17875 switch (nopnds)
17877 case 1:
17878 type = build_function_type_list (op[0], NULL_TREE);
17879 break;
17880 case 2:
17881 type = build_function_type_list (op[0], op[1], NULL_TREE);
17882 break;
17883 case 3:
17884 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17885 break;
17886 case 4:
17887 type = build_function_type_list (op[0], op[1], op[2], op[3],
17888 NULL_TREE);
17889 break;
17890 default:
17891 gcc_unreachable ();
17894 def_builtin (d->name, type, d->code);
17898 /* Hash function for builtin functions with up to 3 arguments and a return
17899 type. */
17900 hashval_t
17901 builtin_hasher::hash (builtin_hash_struct *bh)
17903 unsigned ret = 0;
17904 int i;
17906 for (i = 0; i < 4; i++)
17908 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17909 ret = (ret * 2) + bh->uns_p[i];
17912 return ret;
17915 /* Compare builtin hash entries H1 and H2 for equivalence. */
17916 bool
17917 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17919 return ((p1->mode[0] == p2->mode[0])
17920 && (p1->mode[1] == p2->mode[1])
17921 && (p1->mode[2] == p2->mode[2])
17922 && (p1->mode[3] == p2->mode[3])
17923 && (p1->uns_p[0] == p2->uns_p[0])
17924 && (p1->uns_p[1] == p2->uns_p[1])
17925 && (p1->uns_p[2] == p2->uns_p[2])
17926 && (p1->uns_p[3] == p2->uns_p[3]));
17929 /* Map types for builtin functions with an explicit return type and up to 3
17930 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17931 of the argument. */
17932 static tree
17933 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17934 machine_mode mode_arg1, machine_mode mode_arg2,
17935 enum rs6000_builtins builtin, const char *name)
17937 struct builtin_hash_struct h;
17938 struct builtin_hash_struct *h2;
17939 int num_args = 3;
17940 int i;
17941 tree ret_type = NULL_TREE;
17942 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17944 /* Create builtin_hash_table. */
17945 if (builtin_hash_table == NULL)
17946 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17948 h.type = NULL_TREE;
17949 h.mode[0] = mode_ret;
17950 h.mode[1] = mode_arg0;
17951 h.mode[2] = mode_arg1;
17952 h.mode[3] = mode_arg2;
17953 h.uns_p[0] = 0;
17954 h.uns_p[1] = 0;
17955 h.uns_p[2] = 0;
17956 h.uns_p[3] = 0;
17958 /* If the builtin is a type that produces unsigned results or takes unsigned
17959 arguments, and it is returned as a decl for the vectorizer (such as
17960 widening multiplies, permute), make sure the arguments and return value
17961 are type correct. */
17962 switch (builtin)
17964 /* unsigned 1 argument functions. */
17965 case CRYPTO_BUILTIN_VSBOX:
17966 case P8V_BUILTIN_VGBBD:
17967 case MISC_BUILTIN_CDTBCD:
17968 case MISC_BUILTIN_CBCDTD:
17969 h.uns_p[0] = 1;
17970 h.uns_p[1] = 1;
17971 break;
17973 /* unsigned 2 argument functions. */
17974 case ALTIVEC_BUILTIN_VMULEUB:
17975 case ALTIVEC_BUILTIN_VMULEUH:
17976 case ALTIVEC_BUILTIN_VMULEUW:
17977 case ALTIVEC_BUILTIN_VMULOUB:
17978 case ALTIVEC_BUILTIN_VMULOUH:
17979 case ALTIVEC_BUILTIN_VMULOUW:
17980 case CRYPTO_BUILTIN_VCIPHER:
17981 case CRYPTO_BUILTIN_VCIPHERLAST:
17982 case CRYPTO_BUILTIN_VNCIPHER:
17983 case CRYPTO_BUILTIN_VNCIPHERLAST:
17984 case CRYPTO_BUILTIN_VPMSUMB:
17985 case CRYPTO_BUILTIN_VPMSUMH:
17986 case CRYPTO_BUILTIN_VPMSUMW:
17987 case CRYPTO_BUILTIN_VPMSUMD:
17988 case CRYPTO_BUILTIN_VPMSUM:
17989 case MISC_BUILTIN_ADDG6S:
17990 case MISC_BUILTIN_DIVWEU:
17991 case MISC_BUILTIN_DIVWEUO:
17992 case MISC_BUILTIN_DIVDEU:
17993 case MISC_BUILTIN_DIVDEUO:
17994 case VSX_BUILTIN_UDIV_V2DI:
17995 case ALTIVEC_BUILTIN_VMAXUB:
17996 case ALTIVEC_BUILTIN_VMINUB:
17997 case ALTIVEC_BUILTIN_VMAXUH:
17998 case ALTIVEC_BUILTIN_VMINUH:
17999 case ALTIVEC_BUILTIN_VMAXUW:
18000 case ALTIVEC_BUILTIN_VMINUW:
18001 case P8V_BUILTIN_VMAXUD:
18002 case P8V_BUILTIN_VMINUD:
18003 h.uns_p[0] = 1;
18004 h.uns_p[1] = 1;
18005 h.uns_p[2] = 1;
18006 break;
18008 /* unsigned 3 argument functions. */
18009 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18010 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18011 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18012 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18013 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18014 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18015 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18016 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18017 case VSX_BUILTIN_VPERM_16QI_UNS:
18018 case VSX_BUILTIN_VPERM_8HI_UNS:
18019 case VSX_BUILTIN_VPERM_4SI_UNS:
18020 case VSX_BUILTIN_VPERM_2DI_UNS:
18021 case VSX_BUILTIN_XXSEL_16QI_UNS:
18022 case VSX_BUILTIN_XXSEL_8HI_UNS:
18023 case VSX_BUILTIN_XXSEL_4SI_UNS:
18024 case VSX_BUILTIN_XXSEL_2DI_UNS:
18025 case CRYPTO_BUILTIN_VPERMXOR:
18026 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
18027 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
18028 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
18029 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
18030 case CRYPTO_BUILTIN_VSHASIGMAW:
18031 case CRYPTO_BUILTIN_VSHASIGMAD:
18032 case CRYPTO_BUILTIN_VSHASIGMA:
18033 h.uns_p[0] = 1;
18034 h.uns_p[1] = 1;
18035 h.uns_p[2] = 1;
18036 h.uns_p[3] = 1;
18037 break;
18039 /* signed permute functions with unsigned char mask. */
18040 case ALTIVEC_BUILTIN_VPERM_16QI:
18041 case ALTIVEC_BUILTIN_VPERM_8HI:
18042 case ALTIVEC_BUILTIN_VPERM_4SI:
18043 case ALTIVEC_BUILTIN_VPERM_4SF:
18044 case ALTIVEC_BUILTIN_VPERM_2DI:
18045 case ALTIVEC_BUILTIN_VPERM_2DF:
18046 case VSX_BUILTIN_VPERM_16QI:
18047 case VSX_BUILTIN_VPERM_8HI:
18048 case VSX_BUILTIN_VPERM_4SI:
18049 case VSX_BUILTIN_VPERM_4SF:
18050 case VSX_BUILTIN_VPERM_2DI:
18051 case VSX_BUILTIN_VPERM_2DF:
18052 h.uns_p[3] = 1;
18053 break;
18055 /* unsigned args, signed return. */
18056 case VSX_BUILTIN_XVCVUXDSP:
18057 case VSX_BUILTIN_XVCVUXDDP_UNS:
18058 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18059 h.uns_p[1] = 1;
18060 break;
18062 /* signed args, unsigned return. */
18063 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18064 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18065 case MISC_BUILTIN_UNPACK_TD:
18066 case MISC_BUILTIN_UNPACK_V1TI:
18067 h.uns_p[0] = 1;
18068 break;
18070 /* unsigned arguments for 128-bit pack instructions. */
18071 case MISC_BUILTIN_PACK_TD:
18072 case MISC_BUILTIN_PACK_V1TI:
18073 h.uns_p[1] = 1;
18074 h.uns_p[2] = 1;
18075 break;
18077 /* unsigned second arguments (vector shift right). */
18078 case ALTIVEC_BUILTIN_VSRB:
18079 case ALTIVEC_BUILTIN_VSRH:
18080 case ALTIVEC_BUILTIN_VSRW:
18081 case P8V_BUILTIN_VSRD:
18082 h.uns_p[2] = 1;
18083 break;
18085 default:
18086 break;
18089 /* Figure out how many args are present. */
18090 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18091 num_args--;
18093 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18094 if (!ret_type && h.uns_p[0])
18095 ret_type = builtin_mode_to_type[h.mode[0]][0];
18097 if (!ret_type)
18098 fatal_error (input_location,
18099 "internal error: builtin function %qs had an unexpected "
18100 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18102 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18103 arg_type[i] = NULL_TREE;
18105 for (i = 0; i < num_args; i++)
18107 int m = (int) h.mode[i+1];
18108 int uns_p = h.uns_p[i+1];
18110 arg_type[i] = builtin_mode_to_type[m][uns_p];
18111 if (!arg_type[i] && uns_p)
18112 arg_type[i] = builtin_mode_to_type[m][0];
18114 if (!arg_type[i])
18115 fatal_error (input_location,
18116 "internal error: builtin function %qs, argument %d "
18117 "had unexpected argument type %qs", name, i,
18118 GET_MODE_NAME (m));
18121 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18122 if (*found == NULL)
18124 h2 = ggc_alloc<builtin_hash_struct> ();
18125 *h2 = h;
18126 *found = h2;
18128 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18129 arg_type[2], NULL_TREE);
18132 return (*found)->type;
18135 static void
18136 rs6000_common_init_builtins (void)
18138 const struct builtin_description *d;
18139 size_t i;
18141 tree opaque_ftype_opaque = NULL_TREE;
18142 tree opaque_ftype_opaque_opaque = NULL_TREE;
18143 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18144 tree v2si_ftype = NULL_TREE;
18145 tree v2si_ftype_qi = NULL_TREE;
18146 tree v2si_ftype_v2si_qi = NULL_TREE;
18147 tree v2si_ftype_int_qi = NULL_TREE;
18148 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18150 if (!TARGET_PAIRED_FLOAT)
18152 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18153 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18156 /* Paired builtins are only available if you build a compiler with the
18157 appropriate options, so only create those builtins with the appropriate
18158 compiler option. Create Altivec and VSX builtins on machines with at
18159 least the general purpose extensions (970 and newer) to allow the use of
18160 the target attribute.. */
18162 if (TARGET_EXTRA_BUILTINS)
18163 builtin_mask |= RS6000_BTM_COMMON;
18165 /* Add the ternary operators. */
18166 d = bdesc_3arg;
18167 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18169 tree type;
18170 HOST_WIDE_INT mask = d->mask;
18172 if ((mask & builtin_mask) != mask)
18174 if (TARGET_DEBUG_BUILTIN)
18175 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18176 continue;
18179 if (rs6000_overloaded_builtin_p (d->code))
18181 if (! (type = opaque_ftype_opaque_opaque_opaque))
18182 type = opaque_ftype_opaque_opaque_opaque
18183 = build_function_type_list (opaque_V4SI_type_node,
18184 opaque_V4SI_type_node,
18185 opaque_V4SI_type_node,
18186 opaque_V4SI_type_node,
18187 NULL_TREE);
18189 else
18191 enum insn_code icode = d->icode;
18192 if (d->name == 0)
18194 if (TARGET_DEBUG_BUILTIN)
18195 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18196 (long unsigned)i);
18198 continue;
18201 if (icode == CODE_FOR_nothing)
18203 if (TARGET_DEBUG_BUILTIN)
18204 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18205 d->name);
18207 continue;
18210 type = builtin_function_type (insn_data[icode].operand[0].mode,
18211 insn_data[icode].operand[1].mode,
18212 insn_data[icode].operand[2].mode,
18213 insn_data[icode].operand[3].mode,
18214 d->code, d->name);
18217 def_builtin (d->name, type, d->code);
18220 /* Add the binary operators. */
18221 d = bdesc_2arg;
18222 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18224 machine_mode mode0, mode1, mode2;
18225 tree type;
18226 HOST_WIDE_INT mask = d->mask;
18228 if ((mask & builtin_mask) != mask)
18230 if (TARGET_DEBUG_BUILTIN)
18231 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18232 continue;
18235 if (rs6000_overloaded_builtin_p (d->code))
18237 if (! (type = opaque_ftype_opaque_opaque))
18238 type = opaque_ftype_opaque_opaque
18239 = build_function_type_list (opaque_V4SI_type_node,
18240 opaque_V4SI_type_node,
18241 opaque_V4SI_type_node,
18242 NULL_TREE);
18244 else
18246 enum insn_code icode = d->icode;
18247 if (d->name == 0)
18249 if (TARGET_DEBUG_BUILTIN)
18250 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18251 (long unsigned)i);
18253 continue;
18256 if (icode == CODE_FOR_nothing)
18258 if (TARGET_DEBUG_BUILTIN)
18259 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18260 d->name);
18262 continue;
18265 mode0 = insn_data[icode].operand[0].mode;
18266 mode1 = insn_data[icode].operand[1].mode;
18267 mode2 = insn_data[icode].operand[2].mode;
18269 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18271 if (! (type = v2si_ftype_v2si_qi))
18272 type = v2si_ftype_v2si_qi
18273 = build_function_type_list (opaque_V2SI_type_node,
18274 opaque_V2SI_type_node,
18275 char_type_node,
18276 NULL_TREE);
18279 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18280 && mode2 == QImode)
18282 if (! (type = v2si_ftype_int_qi))
18283 type = v2si_ftype_int_qi
18284 = build_function_type_list (opaque_V2SI_type_node,
18285 integer_type_node,
18286 char_type_node,
18287 NULL_TREE);
18290 else
18291 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18292 d->code, d->name);
18295 def_builtin (d->name, type, d->code);
18298 /* Add the simple unary operators. */
18299 d = bdesc_1arg;
18300 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18302 machine_mode mode0, mode1;
18303 tree type;
18304 HOST_WIDE_INT mask = d->mask;
18306 if ((mask & builtin_mask) != mask)
18308 if (TARGET_DEBUG_BUILTIN)
18309 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18310 continue;
18313 if (rs6000_overloaded_builtin_p (d->code))
18315 if (! (type = opaque_ftype_opaque))
18316 type = opaque_ftype_opaque
18317 = build_function_type_list (opaque_V4SI_type_node,
18318 opaque_V4SI_type_node,
18319 NULL_TREE);
18321 else
18323 enum insn_code icode = d->icode;
18324 if (d->name == 0)
18326 if (TARGET_DEBUG_BUILTIN)
18327 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18328 (long unsigned)i);
18330 continue;
18333 if (icode == CODE_FOR_nothing)
18335 if (TARGET_DEBUG_BUILTIN)
18336 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18337 d->name);
18339 continue;
18342 mode0 = insn_data[icode].operand[0].mode;
18343 mode1 = insn_data[icode].operand[1].mode;
18345 if (mode0 == V2SImode && mode1 == QImode)
18347 if (! (type = v2si_ftype_qi))
18348 type = v2si_ftype_qi
18349 = build_function_type_list (opaque_V2SI_type_node,
18350 char_type_node,
18351 NULL_TREE);
18354 else
18355 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18356 d->code, d->name);
18359 def_builtin (d->name, type, d->code);
18362 /* Add the simple no-argument operators. */
18363 d = bdesc_0arg;
18364 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18366 machine_mode mode0;
18367 tree type;
18368 HOST_WIDE_INT mask = d->mask;
18370 if ((mask & builtin_mask) != mask)
18372 if (TARGET_DEBUG_BUILTIN)
18373 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18374 continue;
18376 if (rs6000_overloaded_builtin_p (d->code))
18378 if (!opaque_ftype_opaque)
18379 opaque_ftype_opaque
18380 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18381 type = opaque_ftype_opaque;
18383 else
18385 enum insn_code icode = d->icode;
18386 if (d->name == 0)
18388 if (TARGET_DEBUG_BUILTIN)
18389 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18390 (long unsigned) i);
18391 continue;
18393 if (icode == CODE_FOR_nothing)
18395 if (TARGET_DEBUG_BUILTIN)
18396 fprintf (stderr,
18397 "rs6000_builtin, skip no-argument %s (no code)\n",
18398 d->name);
18399 continue;
18401 mode0 = insn_data[icode].operand[0].mode;
18402 if (mode0 == V2SImode)
18404 /* code for paired single */
18405 if (! (type = v2si_ftype))
18407 v2si_ftype
18408 = build_function_type_list (opaque_V2SI_type_node,
18409 NULL_TREE);
18410 type = v2si_ftype;
18413 else
18414 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18415 d->code, d->name);
18417 def_builtin (d->name, type, d->code);
18421 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18422 static void
18423 init_float128_ibm (machine_mode mode)
18425 if (!TARGET_XL_COMPAT)
18427 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18428 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18429 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18430 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18432 if (!TARGET_HARD_FLOAT)
18434 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18435 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18436 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18437 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18438 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18439 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18440 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18441 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18443 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18444 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18445 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18446 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18447 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18448 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18449 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18450 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18453 else
18455 set_optab_libfunc (add_optab, mode, "_xlqadd");
18456 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18457 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18458 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18461 /* Add various conversions for IFmode to use the traditional TFmode
18462 names. */
18463 if (mode == IFmode)
18465 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18466 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18467 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18468 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18469 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18470 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18472 if (TARGET_POWERPC64)
18474 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18475 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18476 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18477 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18482 /* Set up IEEE 128-bit floating point routines. Use different names if the
18483 arguments can be passed in a vector register. The historical PowerPC
18484 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18485 continue to use that if we aren't using vector registers to pass IEEE
18486 128-bit floating point. */
18488 static void
18489 init_float128_ieee (machine_mode mode)
18491 if (FLOAT128_VECTOR_P (mode))
18493 set_optab_libfunc (add_optab, mode, "__addkf3");
18494 set_optab_libfunc (sub_optab, mode, "__subkf3");
18495 set_optab_libfunc (neg_optab, mode, "__negkf2");
18496 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18497 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18498 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18499 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18501 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18502 set_optab_libfunc (ne_optab, mode, "__nekf2");
18503 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18504 set_optab_libfunc (ge_optab, mode, "__gekf2");
18505 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18506 set_optab_libfunc (le_optab, mode, "__lekf2");
18507 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18509 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18510 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18511 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18512 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18514 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18515 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18516 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18518 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18519 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18520 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18522 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18523 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18524 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18525 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18526 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18527 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18529 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18530 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18531 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18532 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18534 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18535 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18536 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18537 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18539 if (TARGET_POWERPC64)
18541 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18542 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18543 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18544 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18548 else
18550 set_optab_libfunc (add_optab, mode, "_q_add");
18551 set_optab_libfunc (sub_optab, mode, "_q_sub");
18552 set_optab_libfunc (neg_optab, mode, "_q_neg");
18553 set_optab_libfunc (smul_optab, mode, "_q_mul");
18554 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18555 if (TARGET_PPC_GPOPT)
18556 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18558 set_optab_libfunc (eq_optab, mode, "_q_feq");
18559 set_optab_libfunc (ne_optab, mode, "_q_fne");
18560 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18561 set_optab_libfunc (ge_optab, mode, "_q_fge");
18562 set_optab_libfunc (lt_optab, mode, "_q_flt");
18563 set_optab_libfunc (le_optab, mode, "_q_fle");
18565 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18566 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18567 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18568 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18569 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18570 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18571 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18572 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18576 static void
18577 rs6000_init_libfuncs (void)
18579 /* __float128 support. */
18580 if (TARGET_FLOAT128_TYPE)
18582 init_float128_ibm (IFmode);
18583 init_float128_ieee (KFmode);
18586 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18587 if (TARGET_LONG_DOUBLE_128)
18589 if (!TARGET_IEEEQUAD)
18590 init_float128_ibm (TFmode);
18592 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18593 else
18594 init_float128_ieee (TFmode);
18598 /* Emit a potentially record-form instruction, setting DST from SRC.
18599 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18600 signed comparison of DST with zero. If DOT is 1, the generated RTL
18601 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18602 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18603 a separate COMPARE. */
18605 void
18606 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18608 if (dot == 0)
18610 emit_move_insn (dst, src);
18611 return;
18614 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18616 emit_move_insn (dst, src);
18617 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18618 return;
18621 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18622 if (dot == 1)
18624 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18625 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18627 else
18629 rtx set = gen_rtx_SET (dst, src);
18630 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18635 /* A validation routine: say whether CODE, a condition code, and MODE
18636 match. The other alternatives either don't make sense or should
18637 never be generated. */
18639 void
18640 validate_condition_mode (enum rtx_code code, machine_mode mode)
18642 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18643 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18644 && GET_MODE_CLASS (mode) == MODE_CC);
18646 /* These don't make sense. */
18647 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18648 || mode != CCUNSmode);
18650 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18651 || mode == CCUNSmode);
18653 gcc_assert (mode == CCFPmode
18654 || (code != ORDERED && code != UNORDERED
18655 && code != UNEQ && code != LTGT
18656 && code != UNGT && code != UNLT
18657 && code != UNGE && code != UNLE));
18659 /* These should never be generated except for
18660 flag_finite_math_only. */
18661 gcc_assert (mode != CCFPmode
18662 || flag_finite_math_only
18663 || (code != LE && code != GE
18664 && code != UNEQ && code != LTGT
18665 && code != UNGT && code != UNLT));
18667 /* These are invalid; the information is not there. */
18668 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18672 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18673 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18674 not zero, store there the bit offset (counted from the right) where
18675 the single stretch of 1 bits begins; and similarly for B, the bit
18676 offset where it ends. */
18678 bool
18679 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18681 unsigned HOST_WIDE_INT val = INTVAL (mask);
18682 unsigned HOST_WIDE_INT bit;
18683 int nb, ne;
18684 int n = GET_MODE_PRECISION (mode);
18686 if (mode != DImode && mode != SImode)
18687 return false;
18689 if (INTVAL (mask) >= 0)
18691 bit = val & -val;
18692 ne = exact_log2 (bit);
18693 nb = exact_log2 (val + bit);
18695 else if (val + 1 == 0)
18697 nb = n;
18698 ne = 0;
18700 else if (val & 1)
18702 val = ~val;
18703 bit = val & -val;
18704 nb = exact_log2 (bit);
18705 ne = exact_log2 (val + bit);
18707 else
18709 bit = val & -val;
18710 ne = exact_log2 (bit);
18711 if (val + bit == 0)
18712 nb = n;
18713 else
18714 nb = 0;
18717 nb--;
18719 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18720 return false;
18722 if (b)
18723 *b = nb;
18724 if (e)
18725 *e = ne;
18727 return true;
18730 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18731 or rldicr instruction, to implement an AND with it in mode MODE. */
18733 bool
18734 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18736 int nb, ne;
18738 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18739 return false;
18741 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18742 does not wrap. */
18743 if (mode == DImode)
18744 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18746 /* For SImode, rlwinm can do everything. */
18747 if (mode == SImode)
18748 return (nb < 32 && ne < 32);
18750 return false;
18753 /* Return the instruction template for an AND with mask in mode MODE, with
18754 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18756 const char *
18757 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18759 int nb, ne;
18761 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18762 gcc_unreachable ();
18764 if (mode == DImode && ne == 0)
18766 operands[3] = GEN_INT (63 - nb);
18767 if (dot)
18768 return "rldicl. %0,%1,0,%3";
18769 return "rldicl %0,%1,0,%3";
18772 if (mode == DImode && nb == 63)
18774 operands[3] = GEN_INT (63 - ne);
18775 if (dot)
18776 return "rldicr. %0,%1,0,%3";
18777 return "rldicr %0,%1,0,%3";
18780 if (nb < 32 && ne < 32)
18782 operands[3] = GEN_INT (31 - nb);
18783 operands[4] = GEN_INT (31 - ne);
18784 if (dot)
18785 return "rlwinm. %0,%1,0,%3,%4";
18786 return "rlwinm %0,%1,0,%3,%4";
18789 gcc_unreachable ();
18792 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18793 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18794 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18796 bool
18797 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18799 int nb, ne;
18801 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18802 return false;
18804 int n = GET_MODE_PRECISION (mode);
18805 int sh = -1;
18807 if (CONST_INT_P (XEXP (shift, 1)))
18809 sh = INTVAL (XEXP (shift, 1));
18810 if (sh < 0 || sh >= n)
18811 return false;
18814 rtx_code code = GET_CODE (shift);
18816 /* Convert any shift by 0 to a rotate, to simplify below code. */
18817 if (sh == 0)
18818 code = ROTATE;
18820 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18821 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18822 code = ASHIFT;
18823 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18825 code = LSHIFTRT;
18826 sh = n - sh;
18829 /* DImode rotates need rld*. */
18830 if (mode == DImode && code == ROTATE)
18831 return (nb == 63 || ne == 0 || ne == sh);
18833 /* SImode rotates need rlw*. */
18834 if (mode == SImode && code == ROTATE)
18835 return (nb < 32 && ne < 32 && sh < 32);
18837 /* Wrap-around masks are only okay for rotates. */
18838 if (ne > nb)
18839 return false;
18841 /* Variable shifts are only okay for rotates. */
18842 if (sh < 0)
18843 return false;
18845 /* Don't allow ASHIFT if the mask is wrong for that. */
18846 if (code == ASHIFT && ne < sh)
18847 return false;
18849 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18850 if the mask is wrong for that. */
18851 if (nb < 32 && ne < 32 && sh < 32
18852 && !(code == LSHIFTRT && nb >= 32 - sh))
18853 return true;
18855 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18856 if the mask is wrong for that. */
18857 if (code == LSHIFTRT)
18858 sh = 64 - sh;
18859 if (nb == 63 || ne == 0 || ne == sh)
18860 return !(code == LSHIFTRT && nb >= sh);
18862 return false;
18865 /* Return the instruction template for a shift with mask in mode MODE, with
18866 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18868 const char *
18869 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18871 int nb, ne;
18873 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18874 gcc_unreachable ();
18876 if (mode == DImode && ne == 0)
18878 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18879 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18880 operands[3] = GEN_INT (63 - nb);
18881 if (dot)
18882 return "rld%I2cl. %0,%1,%2,%3";
18883 return "rld%I2cl %0,%1,%2,%3";
18886 if (mode == DImode && nb == 63)
18888 operands[3] = GEN_INT (63 - ne);
18889 if (dot)
18890 return "rld%I2cr. %0,%1,%2,%3";
18891 return "rld%I2cr %0,%1,%2,%3";
18894 if (mode == DImode
18895 && GET_CODE (operands[4]) != LSHIFTRT
18896 && CONST_INT_P (operands[2])
18897 && ne == INTVAL (operands[2]))
18899 operands[3] = GEN_INT (63 - nb);
18900 if (dot)
18901 return "rld%I2c. %0,%1,%2,%3";
18902 return "rld%I2c %0,%1,%2,%3";
18905 if (nb < 32 && ne < 32)
18907 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18908 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18909 operands[3] = GEN_INT (31 - nb);
18910 operands[4] = GEN_INT (31 - ne);
18911 /* This insn can also be a 64-bit rotate with mask that really makes
18912 it just a shift right (with mask); the %h below are to adjust for
18913 that situation (shift count is >= 32 in that case). */
18914 if (dot)
18915 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18916 return "rlw%I2nm %0,%1,%h2,%3,%4";
18919 gcc_unreachable ();
18922 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18923 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18924 ASHIFT, or LSHIFTRT) in mode MODE. */
18926 bool
18927 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18929 int nb, ne;
18931 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18932 return false;
18934 int n = GET_MODE_PRECISION (mode);
18936 int sh = INTVAL (XEXP (shift, 1));
18937 if (sh < 0 || sh >= n)
18938 return false;
18940 rtx_code code = GET_CODE (shift);
18942 /* Convert any shift by 0 to a rotate, to simplify below code. */
18943 if (sh == 0)
18944 code = ROTATE;
18946 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18947 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18948 code = ASHIFT;
18949 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18951 code = LSHIFTRT;
18952 sh = n - sh;
18955 /* DImode rotates need rldimi. */
18956 if (mode == DImode && code == ROTATE)
18957 return (ne == sh);
18959 /* SImode rotates need rlwimi. */
18960 if (mode == SImode && code == ROTATE)
18961 return (nb < 32 && ne < 32 && sh < 32);
18963 /* Wrap-around masks are only okay for rotates. */
18964 if (ne > nb)
18965 return false;
18967 /* Don't allow ASHIFT if the mask is wrong for that. */
18968 if (code == ASHIFT && ne < sh)
18969 return false;
18971 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18972 if the mask is wrong for that. */
18973 if (nb < 32 && ne < 32 && sh < 32
18974 && !(code == LSHIFTRT && nb >= 32 - sh))
18975 return true;
18977 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18978 if the mask is wrong for that. */
18979 if (code == LSHIFTRT)
18980 sh = 64 - sh;
18981 if (ne == sh)
18982 return !(code == LSHIFTRT && nb >= sh);
18984 return false;
18987 /* Return the instruction template for an insert with mask in mode MODE, with
18988 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18990 const char *
18991 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18993 int nb, ne;
18995 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18996 gcc_unreachable ();
18998 /* Prefer rldimi because rlwimi is cracked. */
18999 if (TARGET_POWERPC64
19000 && (!dot || mode == DImode)
19001 && GET_CODE (operands[4]) != LSHIFTRT
19002 && ne == INTVAL (operands[2]))
19004 operands[3] = GEN_INT (63 - nb);
19005 if (dot)
19006 return "rldimi. %0,%1,%2,%3";
19007 return "rldimi %0,%1,%2,%3";
19010 if (nb < 32 && ne < 32)
19012 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19013 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19014 operands[3] = GEN_INT (31 - nb);
19015 operands[4] = GEN_INT (31 - ne);
19016 if (dot)
19017 return "rlwimi. %0,%1,%2,%3,%4";
19018 return "rlwimi %0,%1,%2,%3,%4";
19021 gcc_unreachable ();
19024 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19025 using two machine instructions. */
19027 bool
19028 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
19030 /* There are two kinds of AND we can handle with two insns:
19031 1) those we can do with two rl* insn;
19032 2) ori[s];xori[s].
19034 We do not handle that last case yet. */
19036 /* If there is just one stretch of ones, we can do it. */
19037 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
19038 return true;
19040 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19041 one insn, we can do the whole thing with two. */
19042 unsigned HOST_WIDE_INT val = INTVAL (c);
19043 unsigned HOST_WIDE_INT bit1 = val & -val;
19044 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19045 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19046 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19047 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
19050 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19051 If EXPAND is true, split rotate-and-mask instructions we generate to
19052 their constituent parts as well (this is used during expand); if DOT
19053 is 1, make the last insn a record-form instruction clobbering the
19054 destination GPR and setting the CC reg (from operands[3]); if 2, set
19055 that GPR as well as the CC reg. */
19057 void
19058 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
19060 gcc_assert (!(expand && dot));
19062 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
19064 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19065 shift right. This generates better code than doing the masks without
19066 shifts, or shifting first right and then left. */
19067 int nb, ne;
19068 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
19070 gcc_assert (mode == DImode);
19072 int shift = 63 - nb;
19073 if (expand)
19075 rtx tmp1 = gen_reg_rtx (DImode);
19076 rtx tmp2 = gen_reg_rtx (DImode);
19077 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19078 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19079 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19081 else
19083 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19084 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19085 emit_move_insn (operands[0], tmp);
19086 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19087 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19089 return;
19092 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19093 that does the rest. */
19094 unsigned HOST_WIDE_INT bit1 = val & -val;
19095 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19096 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19097 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19099 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19100 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19102 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19104 /* Two "no-rotate"-and-mask instructions, for SImode. */
19105 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19107 gcc_assert (mode == SImode);
19109 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19110 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19111 emit_move_insn (reg, tmp);
19112 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19113 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19114 return;
19117 gcc_assert (mode == DImode);
19119 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19120 insns; we have to do the first in SImode, because it wraps. */
19121 if (mask2 <= 0xffffffff
19122 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19124 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19125 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19126 GEN_INT (mask1));
19127 rtx reg_low = gen_lowpart (SImode, reg);
19128 emit_move_insn (reg_low, tmp);
19129 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19130 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19131 return;
19134 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19135 at the top end), rotate back and clear the other hole. */
19136 int right = exact_log2 (bit3);
19137 int left = 64 - right;
19139 /* Rotate the mask too. */
19140 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19142 if (expand)
19144 rtx tmp1 = gen_reg_rtx (DImode);
19145 rtx tmp2 = gen_reg_rtx (DImode);
19146 rtx tmp3 = gen_reg_rtx (DImode);
19147 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19148 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19149 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19150 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19152 else
19154 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19155 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19156 emit_move_insn (operands[0], tmp);
19157 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19158 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19159 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19163 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19164 for lfq and stfq insns iff the registers are hard registers. */
19167 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19169 /* We might have been passed a SUBREG. */
19170 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19171 return 0;
19173 /* We might have been passed non floating point registers. */
19174 if (!FP_REGNO_P (REGNO (reg1))
19175 || !FP_REGNO_P (REGNO (reg2)))
19176 return 0;
19178 return (REGNO (reg1) == REGNO (reg2) - 1);
19181 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19182 addr1 and addr2 must be in consecutive memory locations
19183 (addr2 == addr1 + 8). */
19186 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19188 rtx addr1, addr2;
19189 unsigned int reg1, reg2;
19190 int offset1, offset2;
19192 /* The mems cannot be volatile. */
19193 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19194 return 0;
19196 addr1 = XEXP (mem1, 0);
19197 addr2 = XEXP (mem2, 0);
19199 /* Extract an offset (if used) from the first addr. */
19200 if (GET_CODE (addr1) == PLUS)
19202 /* If not a REG, return zero. */
19203 if (GET_CODE (XEXP (addr1, 0)) != REG)
19204 return 0;
19205 else
19207 reg1 = REGNO (XEXP (addr1, 0));
19208 /* The offset must be constant! */
19209 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19210 return 0;
19211 offset1 = INTVAL (XEXP (addr1, 1));
19214 else if (GET_CODE (addr1) != REG)
19215 return 0;
19216 else
19218 reg1 = REGNO (addr1);
19219 /* This was a simple (mem (reg)) expression. Offset is 0. */
19220 offset1 = 0;
19223 /* And now for the second addr. */
19224 if (GET_CODE (addr2) == PLUS)
19226 /* If not a REG, return zero. */
19227 if (GET_CODE (XEXP (addr2, 0)) != REG)
19228 return 0;
19229 else
19231 reg2 = REGNO (XEXP (addr2, 0));
19232 /* The offset must be constant. */
19233 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19234 return 0;
19235 offset2 = INTVAL (XEXP (addr2, 1));
19238 else if (GET_CODE (addr2) != REG)
19239 return 0;
19240 else
19242 reg2 = REGNO (addr2);
19243 /* This was a simple (mem (reg)) expression. Offset is 0. */
19244 offset2 = 0;
19247 /* Both of these must have the same base register. */
19248 if (reg1 != reg2)
19249 return 0;
19251 /* The offset for the second addr must be 8 more than the first addr. */
19252 if (offset2 != offset1 + 8)
19253 return 0;
19255 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19256 instructions. */
19257 return 1;
19260 /* Return the mode to be used for memory when a secondary memory
19261 location is needed. For SDmode values we need to use DDmode, in
19262 all other cases we can use the same mode. */
19263 machine_mode
19264 rs6000_secondary_memory_needed_mode (machine_mode mode)
19266 if (lra_in_progress && mode == SDmode)
19267 return DDmode;
19268 return mode;
19271 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19272 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19273 only work on the traditional altivec registers, note if an altivec register
19274 was chosen. */
19276 static enum rs6000_reg_type
19277 register_to_reg_type (rtx reg, bool *is_altivec)
19279 HOST_WIDE_INT regno;
19280 enum reg_class rclass;
19282 if (GET_CODE (reg) == SUBREG)
19283 reg = SUBREG_REG (reg);
19285 if (!REG_P (reg))
19286 return NO_REG_TYPE;
19288 regno = REGNO (reg);
19289 if (regno >= FIRST_PSEUDO_REGISTER)
19291 if (!lra_in_progress && !reload_completed)
19292 return PSEUDO_REG_TYPE;
19294 regno = true_regnum (reg);
19295 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19296 return PSEUDO_REG_TYPE;
19299 gcc_assert (regno >= 0);
19301 if (is_altivec && ALTIVEC_REGNO_P (regno))
19302 *is_altivec = true;
19304 rclass = rs6000_regno_regclass[regno];
19305 return reg_class_to_reg_type[(int)rclass];
19308 /* Helper function to return the cost of adding a TOC entry address. */
19310 static inline int
19311 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19313 int ret;
19315 if (TARGET_CMODEL != CMODEL_SMALL)
19316 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19318 else
19319 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19321 return ret;
19324 /* Helper function for rs6000_secondary_reload to determine whether the memory
19325 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19326 needs reloading. Return negative if the memory is not handled by the memory
19327 helper functions and to try a different reload method, 0 if no additional
19328 instructions are need, and positive to give the extra cost for the
19329 memory. */
19331 static int
19332 rs6000_secondary_reload_memory (rtx addr,
19333 enum reg_class rclass,
19334 machine_mode mode)
19336 int extra_cost = 0;
19337 rtx reg, and_arg, plus_arg0, plus_arg1;
19338 addr_mask_type addr_mask;
19339 const char *type = NULL;
19340 const char *fail_msg = NULL;
19342 if (GPR_REG_CLASS_P (rclass))
19343 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19345 else if (rclass == FLOAT_REGS)
19346 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19348 else if (rclass == ALTIVEC_REGS)
19349 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19351 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19352 else if (rclass == VSX_REGS)
19353 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19354 & ~RELOAD_REG_AND_M16);
19356 /* If the register allocator hasn't made up its mind yet on the register
19357 class to use, settle on defaults to use. */
19358 else if (rclass == NO_REGS)
19360 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19361 & ~RELOAD_REG_AND_M16);
19363 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19364 addr_mask &= ~(RELOAD_REG_INDEXED
19365 | RELOAD_REG_PRE_INCDEC
19366 | RELOAD_REG_PRE_MODIFY);
19369 else
19370 addr_mask = 0;
19372 /* If the register isn't valid in this register class, just return now. */
19373 if ((addr_mask & RELOAD_REG_VALID) == 0)
19375 if (TARGET_DEBUG_ADDR)
19377 fprintf (stderr,
19378 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19379 "not valid in class\n",
19380 GET_MODE_NAME (mode), reg_class_names[rclass]);
19381 debug_rtx (addr);
19384 return -1;
19387 switch (GET_CODE (addr))
19389 /* Does the register class supports auto update forms for this mode? We
19390 don't need a scratch register, since the powerpc only supports
19391 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19392 case PRE_INC:
19393 case PRE_DEC:
19394 reg = XEXP (addr, 0);
19395 if (!base_reg_operand (addr, GET_MODE (reg)))
19397 fail_msg = "no base register #1";
19398 extra_cost = -1;
19401 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19403 extra_cost = 1;
19404 type = "update";
19406 break;
19408 case PRE_MODIFY:
19409 reg = XEXP (addr, 0);
19410 plus_arg1 = XEXP (addr, 1);
19411 if (!base_reg_operand (reg, GET_MODE (reg))
19412 || GET_CODE (plus_arg1) != PLUS
19413 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19415 fail_msg = "bad PRE_MODIFY";
19416 extra_cost = -1;
19419 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19421 extra_cost = 1;
19422 type = "update";
19424 break;
19426 /* Do we need to simulate AND -16 to clear the bottom address bits used
19427 in VMX load/stores? Only allow the AND for vector sizes. */
19428 case AND:
19429 and_arg = XEXP (addr, 0);
19430 if (GET_MODE_SIZE (mode) != 16
19431 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19432 || INTVAL (XEXP (addr, 1)) != -16)
19434 fail_msg = "bad Altivec AND #1";
19435 extra_cost = -1;
19438 if (rclass != ALTIVEC_REGS)
19440 if (legitimate_indirect_address_p (and_arg, false))
19441 extra_cost = 1;
19443 else if (legitimate_indexed_address_p (and_arg, false))
19444 extra_cost = 2;
19446 else
19448 fail_msg = "bad Altivec AND #2";
19449 extra_cost = -1;
19452 type = "and";
19454 break;
19456 /* If this is an indirect address, make sure it is a base register. */
19457 case REG:
19458 case SUBREG:
19459 if (!legitimate_indirect_address_p (addr, false))
19461 extra_cost = 1;
19462 type = "move";
19464 break;
19466 /* If this is an indexed address, make sure the register class can handle
19467 indexed addresses for this mode. */
19468 case PLUS:
19469 plus_arg0 = XEXP (addr, 0);
19470 plus_arg1 = XEXP (addr, 1);
19472 /* (plus (plus (reg) (constant)) (constant)) is generated during
19473 push_reload processing, so handle it now. */
19474 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19476 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19478 extra_cost = 1;
19479 type = "offset";
19483 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19484 push_reload processing, so handle it now. */
19485 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19487 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19489 extra_cost = 1;
19490 type = "indexed #2";
19494 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19496 fail_msg = "no base register #2";
19497 extra_cost = -1;
19500 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19502 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19503 || !legitimate_indexed_address_p (addr, false))
19505 extra_cost = 1;
19506 type = "indexed";
19510 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19511 && CONST_INT_P (plus_arg1))
19513 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19515 extra_cost = 1;
19516 type = "vector d-form offset";
19520 /* Make sure the register class can handle offset addresses. */
19521 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19523 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19525 extra_cost = 1;
19526 type = "offset #2";
19530 else
19532 fail_msg = "bad PLUS";
19533 extra_cost = -1;
19536 break;
19538 case LO_SUM:
19539 /* Quad offsets are restricted and can't handle normal addresses. */
19540 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19542 extra_cost = -1;
19543 type = "vector d-form lo_sum";
19546 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19548 fail_msg = "bad LO_SUM";
19549 extra_cost = -1;
19552 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19554 extra_cost = 1;
19555 type = "lo_sum";
19557 break;
19559 /* Static addresses need to create a TOC entry. */
19560 case CONST:
19561 case SYMBOL_REF:
19562 case LABEL_REF:
19563 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19565 extra_cost = -1;
19566 type = "vector d-form lo_sum #2";
19569 else
19571 type = "address";
19572 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19574 break;
19576 /* TOC references look like offsetable memory. */
19577 case UNSPEC:
19578 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19580 fail_msg = "bad UNSPEC";
19581 extra_cost = -1;
19584 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19586 extra_cost = -1;
19587 type = "vector d-form lo_sum #3";
19590 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19592 extra_cost = 1;
19593 type = "toc reference";
19595 break;
19597 default:
19599 fail_msg = "bad address";
19600 extra_cost = -1;
19604 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19606 if (extra_cost < 0)
19607 fprintf (stderr,
19608 "rs6000_secondary_reload_memory error: mode = %s, "
19609 "class = %s, addr_mask = '%s', %s\n",
19610 GET_MODE_NAME (mode),
19611 reg_class_names[rclass],
19612 rs6000_debug_addr_mask (addr_mask, false),
19613 (fail_msg != NULL) ? fail_msg : "<bad address>");
19615 else
19616 fprintf (stderr,
19617 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19618 "addr_mask = '%s', extra cost = %d, %s\n",
19619 GET_MODE_NAME (mode),
19620 reg_class_names[rclass],
19621 rs6000_debug_addr_mask (addr_mask, false),
19622 extra_cost,
19623 (type) ? type : "<none>");
19625 debug_rtx (addr);
19628 return extra_cost;
19631 /* Helper function for rs6000_secondary_reload to return true if a move to a
19632 different register classe is really a simple move. */
19634 static bool
19635 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19636 enum rs6000_reg_type from_type,
19637 machine_mode mode)
19639 int size = GET_MODE_SIZE (mode);
19641 /* Add support for various direct moves available. In this function, we only
19642 look at cases where we don't need any extra registers, and one or more
19643 simple move insns are issued. Originally small integers are not allowed
19644 in FPR/VSX registers. Single precision binary floating is not a simple
19645 move because we need to convert to the single precision memory layout.
19646 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19647 need special direct move handling, which we do not support yet. */
19648 if (TARGET_DIRECT_MOVE
19649 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19650 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19652 if (TARGET_POWERPC64)
19654 /* ISA 2.07: MTVSRD or MVFVSRD. */
19655 if (size == 8)
19656 return true;
19658 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19659 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19660 return true;
19663 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19664 if (TARGET_P8_VECTOR)
19666 if (mode == SImode)
19667 return true;
19669 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19670 return true;
19673 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19674 if (mode == SDmode)
19675 return true;
19678 /* Power6+: MFTGPR or MFFGPR. */
19679 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19680 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19681 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19682 return true;
19684 /* Move to/from SPR. */
19685 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19686 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19687 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19688 return true;
19690 return false;
19693 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19694 special direct moves that involve allocating an extra register, return the
19695 insn code of the helper function if there is such a function or
19696 CODE_FOR_nothing if not. */
19698 static bool
19699 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19700 enum rs6000_reg_type from_type,
19701 machine_mode mode,
19702 secondary_reload_info *sri,
19703 bool altivec_p)
19705 bool ret = false;
19706 enum insn_code icode = CODE_FOR_nothing;
19707 int cost = 0;
19708 int size = GET_MODE_SIZE (mode);
19710 if (TARGET_POWERPC64 && size == 16)
19712 /* Handle moving 128-bit values from GPRs to VSX point registers on
19713 ISA 2.07 (power8, power9) when running in 64-bit mode using
19714 XXPERMDI to glue the two 64-bit values back together. */
19715 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19717 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19718 icode = reg_addr[mode].reload_vsx_gpr;
19721 /* Handle moving 128-bit values from VSX point registers to GPRs on
19722 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19723 bottom 64-bit value. */
19724 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19726 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19727 icode = reg_addr[mode].reload_gpr_vsx;
19731 else if (TARGET_POWERPC64 && mode == SFmode)
19733 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19735 cost = 3; /* xscvdpspn, mfvsrd, and. */
19736 icode = reg_addr[mode].reload_gpr_vsx;
19739 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19741 cost = 2; /* mtvsrz, xscvspdpn. */
19742 icode = reg_addr[mode].reload_vsx_gpr;
19746 else if (!TARGET_POWERPC64 && size == 8)
19748 /* Handle moving 64-bit values from GPRs to floating point registers on
19749 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19750 32-bit values back together. Altivec register classes must be handled
19751 specially since a different instruction is used, and the secondary
19752 reload support requires a single instruction class in the scratch
19753 register constraint. However, right now TFmode is not allowed in
19754 Altivec registers, so the pattern will never match. */
19755 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19757 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19758 icode = reg_addr[mode].reload_fpr_gpr;
19762 if (icode != CODE_FOR_nothing)
19764 ret = true;
19765 if (sri)
19767 sri->icode = icode;
19768 sri->extra_cost = cost;
19772 return ret;
19775 /* Return whether a move between two register classes can be done either
19776 directly (simple move) or via a pattern that uses a single extra temporary
19777 (using ISA 2.07's direct move in this case. */
19779 static bool
19780 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19781 enum rs6000_reg_type from_type,
19782 machine_mode mode,
19783 secondary_reload_info *sri,
19784 bool altivec_p)
19786 /* Fall back to load/store reloads if either type is not a register. */
19787 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19788 return false;
19790 /* If we haven't allocated registers yet, assume the move can be done for the
19791 standard register types. */
19792 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19793 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19794 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19795 return true;
19797 /* Moves to the same set of registers is a simple move for non-specialized
19798 registers. */
19799 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19800 return true;
19802 /* Check whether a simple move can be done directly. */
19803 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19805 if (sri)
19807 sri->icode = CODE_FOR_nothing;
19808 sri->extra_cost = 0;
19810 return true;
19813 /* Now check if we can do it in a few steps. */
19814 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19815 altivec_p);
19818 /* Inform reload about cases where moving X with a mode MODE to a register in
19819 RCLASS requires an extra scratch or immediate register. Return the class
19820 needed for the immediate register.
19822 For VSX and Altivec, we may need a register to convert sp+offset into
19823 reg+sp.
19825 For misaligned 64-bit gpr loads and stores we need a register to
19826 convert an offset address to indirect. */
19828 static reg_class_t
19829 rs6000_secondary_reload (bool in_p,
19830 rtx x,
19831 reg_class_t rclass_i,
19832 machine_mode mode,
19833 secondary_reload_info *sri)
19835 enum reg_class rclass = (enum reg_class) rclass_i;
19836 reg_class_t ret = ALL_REGS;
19837 enum insn_code icode;
19838 bool default_p = false;
19839 bool done_p = false;
19841 /* Allow subreg of memory before/during reload. */
19842 bool memory_p = (MEM_P (x)
19843 || (!reload_completed && GET_CODE (x) == SUBREG
19844 && MEM_P (SUBREG_REG (x))));
19846 sri->icode = CODE_FOR_nothing;
19847 sri->t_icode = CODE_FOR_nothing;
19848 sri->extra_cost = 0;
19849 icode = ((in_p)
19850 ? reg_addr[mode].reload_load
19851 : reg_addr[mode].reload_store);
19853 if (REG_P (x) || register_operand (x, mode))
19855 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19856 bool altivec_p = (rclass == ALTIVEC_REGS);
19857 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19859 if (!in_p)
19860 std::swap (to_type, from_type);
19862 /* Can we do a direct move of some sort? */
19863 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19864 altivec_p))
19866 icode = (enum insn_code)sri->icode;
19867 default_p = false;
19868 done_p = true;
19869 ret = NO_REGS;
19873 /* Make sure 0.0 is not reloaded or forced into memory. */
19874 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19876 ret = NO_REGS;
19877 default_p = false;
19878 done_p = true;
19881 /* If this is a scalar floating point value and we want to load it into the
19882 traditional Altivec registers, do it via a move via a traditional floating
19883 point register, unless we have D-form addressing. Also make sure that
19884 non-zero constants use a FPR. */
19885 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19886 && !mode_supports_vmx_dform (mode)
19887 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19888 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19890 ret = FLOAT_REGS;
19891 default_p = false;
19892 done_p = true;
19895 /* Handle reload of load/stores if we have reload helper functions. */
19896 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19898 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19899 mode);
19901 if (extra_cost >= 0)
19903 done_p = true;
19904 ret = NO_REGS;
19905 if (extra_cost > 0)
19907 sri->extra_cost = extra_cost;
19908 sri->icode = icode;
19913 /* Handle unaligned loads and stores of integer registers. */
19914 if (!done_p && TARGET_POWERPC64
19915 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19916 && memory_p
19917 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19919 rtx addr = XEXP (x, 0);
19920 rtx off = address_offset (addr);
19922 if (off != NULL_RTX)
19924 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19925 unsigned HOST_WIDE_INT offset = INTVAL (off);
19927 /* We need a secondary reload when our legitimate_address_p
19928 says the address is good (as otherwise the entire address
19929 will be reloaded), and the offset is not a multiple of
19930 four or we have an address wrap. Address wrap will only
19931 occur for LO_SUMs since legitimate_offset_address_p
19932 rejects addresses for 16-byte mems that will wrap. */
19933 if (GET_CODE (addr) == LO_SUM
19934 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19935 && ((offset & 3) != 0
19936 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19937 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19938 && (offset & 3) != 0))
19940 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19941 if (in_p)
19942 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19943 : CODE_FOR_reload_di_load);
19944 else
19945 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19946 : CODE_FOR_reload_di_store);
19947 sri->extra_cost = 2;
19948 ret = NO_REGS;
19949 done_p = true;
19951 else
19952 default_p = true;
19954 else
19955 default_p = true;
19958 if (!done_p && !TARGET_POWERPC64
19959 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19960 && memory_p
19961 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19963 rtx addr = XEXP (x, 0);
19964 rtx off = address_offset (addr);
19966 if (off != NULL_RTX)
19968 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19969 unsigned HOST_WIDE_INT offset = INTVAL (off);
19971 /* We need a secondary reload when our legitimate_address_p
19972 says the address is good (as otherwise the entire address
19973 will be reloaded), and we have a wrap.
19975 legitimate_lo_sum_address_p allows LO_SUM addresses to
19976 have any offset so test for wrap in the low 16 bits.
19978 legitimate_offset_address_p checks for the range
19979 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19980 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19981 [0x7ff4,0x7fff] respectively, so test for the
19982 intersection of these ranges, [0x7ffc,0x7fff] and
19983 [0x7ff4,0x7ff7] respectively.
19985 Note that the address we see here may have been
19986 manipulated by legitimize_reload_address. */
19987 if (GET_CODE (addr) == LO_SUM
19988 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19989 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19991 if (in_p)
19992 sri->icode = CODE_FOR_reload_si_load;
19993 else
19994 sri->icode = CODE_FOR_reload_si_store;
19995 sri->extra_cost = 2;
19996 ret = NO_REGS;
19997 done_p = true;
19999 else
20000 default_p = true;
20002 else
20003 default_p = true;
20006 if (!done_p)
20007 default_p = true;
20009 if (default_p)
20010 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
20012 gcc_assert (ret != ALL_REGS);
20014 if (TARGET_DEBUG_ADDR)
20016 fprintf (stderr,
20017 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20018 "mode = %s",
20019 reg_class_names[ret],
20020 in_p ? "true" : "false",
20021 reg_class_names[rclass],
20022 GET_MODE_NAME (mode));
20024 if (reload_completed)
20025 fputs (", after reload", stderr);
20027 if (!done_p)
20028 fputs (", done_p not set", stderr);
20030 if (default_p)
20031 fputs (", default secondary reload", stderr);
20033 if (sri->icode != CODE_FOR_nothing)
20034 fprintf (stderr, ", reload func = %s, extra cost = %d",
20035 insn_data[sri->icode].name, sri->extra_cost);
20037 else if (sri->extra_cost > 0)
20038 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
20040 fputs ("\n", stderr);
20041 debug_rtx (x);
20044 return ret;
20047 /* Better tracing for rs6000_secondary_reload_inner. */
20049 static void
20050 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
20051 bool store_p)
20053 rtx set, clobber;
20055 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
20057 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
20058 store_p ? "store" : "load");
20060 if (store_p)
20061 set = gen_rtx_SET (mem, reg);
20062 else
20063 set = gen_rtx_SET (reg, mem);
20065 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
20066 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
20069 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
20070 ATTRIBUTE_NORETURN;
20072 static void
20073 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
20074 bool store_p)
20076 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20077 gcc_unreachable ();
20080 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20081 reload helper functions. These were identified in
20082 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20083 reload, it calls the insns:
20084 reload_<RELOAD:mode>_<P:mptrsize>_store
20085 reload_<RELOAD:mode>_<P:mptrsize>_load
20087 which in turn calls this function, to do whatever is necessary to create
20088 valid addresses. */
20090 void
20091 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20093 int regno = true_regnum (reg);
20094 machine_mode mode = GET_MODE (reg);
20095 addr_mask_type addr_mask;
20096 rtx addr;
20097 rtx new_addr;
20098 rtx op_reg, op0, op1;
20099 rtx and_op;
20100 rtx cc_clobber;
20101 rtvec rv;
20103 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20104 || !base_reg_operand (scratch, GET_MODE (scratch)))
20105 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20107 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20108 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20110 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20111 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20113 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20114 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20116 else
20117 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20119 /* Make sure the mode is valid in this register class. */
20120 if ((addr_mask & RELOAD_REG_VALID) == 0)
20121 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20123 if (TARGET_DEBUG_ADDR)
20124 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20126 new_addr = addr = XEXP (mem, 0);
20127 switch (GET_CODE (addr))
20129 /* Does the register class support auto update forms for this mode? If
20130 not, do the update now. We don't need a scratch register, since the
20131 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20132 case PRE_INC:
20133 case PRE_DEC:
20134 op_reg = XEXP (addr, 0);
20135 if (!base_reg_operand (op_reg, Pmode))
20136 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20138 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20140 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20141 new_addr = op_reg;
20143 break;
20145 case PRE_MODIFY:
20146 op0 = XEXP (addr, 0);
20147 op1 = XEXP (addr, 1);
20148 if (!base_reg_operand (op0, Pmode)
20149 || GET_CODE (op1) != PLUS
20150 || !rtx_equal_p (op0, XEXP (op1, 0)))
20151 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20153 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20155 emit_insn (gen_rtx_SET (op0, op1));
20156 new_addr = reg;
20158 break;
20160 /* Do we need to simulate AND -16 to clear the bottom address bits used
20161 in VMX load/stores? */
20162 case AND:
20163 op0 = XEXP (addr, 0);
20164 op1 = XEXP (addr, 1);
20165 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20167 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20168 op_reg = op0;
20170 else if (GET_CODE (op1) == PLUS)
20172 emit_insn (gen_rtx_SET (scratch, op1));
20173 op_reg = scratch;
20176 else
20177 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20179 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20180 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20181 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20182 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20183 new_addr = scratch;
20185 break;
20187 /* If this is an indirect address, make sure it is a base register. */
20188 case REG:
20189 case SUBREG:
20190 if (!base_reg_operand (addr, GET_MODE (addr)))
20192 emit_insn (gen_rtx_SET (scratch, addr));
20193 new_addr = scratch;
20195 break;
20197 /* If this is an indexed address, make sure the register class can handle
20198 indexed addresses for this mode. */
20199 case PLUS:
20200 op0 = XEXP (addr, 0);
20201 op1 = XEXP (addr, 1);
20202 if (!base_reg_operand (op0, Pmode))
20203 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20205 else if (int_reg_operand (op1, Pmode))
20207 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20209 emit_insn (gen_rtx_SET (scratch, addr));
20210 new_addr = scratch;
20214 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20216 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20217 || !quad_address_p (addr, mode, false))
20219 emit_insn (gen_rtx_SET (scratch, addr));
20220 new_addr = scratch;
20224 /* Make sure the register class can handle offset addresses. */
20225 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20227 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20229 emit_insn (gen_rtx_SET (scratch, addr));
20230 new_addr = scratch;
20234 else
20235 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20237 break;
20239 case LO_SUM:
20240 op0 = XEXP (addr, 0);
20241 op1 = XEXP (addr, 1);
20242 if (!base_reg_operand (op0, Pmode))
20243 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20245 else if (int_reg_operand (op1, Pmode))
20247 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20249 emit_insn (gen_rtx_SET (scratch, addr));
20250 new_addr = scratch;
20254 /* Quad offsets are restricted and can't handle normal addresses. */
20255 else if (mode_supports_vsx_dform_quad (mode))
20257 emit_insn (gen_rtx_SET (scratch, addr));
20258 new_addr = scratch;
20261 /* Make sure the register class can handle offset addresses. */
20262 else if (legitimate_lo_sum_address_p (mode, addr, false))
20264 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20266 emit_insn (gen_rtx_SET (scratch, addr));
20267 new_addr = scratch;
20271 else
20272 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20274 break;
20276 case SYMBOL_REF:
20277 case CONST:
20278 case LABEL_REF:
20279 rs6000_emit_move (scratch, addr, Pmode);
20280 new_addr = scratch;
20281 break;
20283 default:
20284 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20287 /* Adjust the address if it changed. */
20288 if (addr != new_addr)
20290 mem = replace_equiv_address_nv (mem, new_addr);
20291 if (TARGET_DEBUG_ADDR)
20292 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20295 /* Now create the move. */
20296 if (store_p)
20297 emit_insn (gen_rtx_SET (mem, reg));
20298 else
20299 emit_insn (gen_rtx_SET (reg, mem));
20301 return;
20304 /* Convert reloads involving 64-bit gprs and misaligned offset
20305 addressing, or multiple 32-bit gprs and offsets that are too large,
20306 to use indirect addressing. */
20308 void
20309 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20311 int regno = true_regnum (reg);
20312 enum reg_class rclass;
20313 rtx addr;
20314 rtx scratch_or_premodify = scratch;
20316 if (TARGET_DEBUG_ADDR)
20318 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20319 store_p ? "store" : "load");
20320 fprintf (stderr, "reg:\n");
20321 debug_rtx (reg);
20322 fprintf (stderr, "mem:\n");
20323 debug_rtx (mem);
20324 fprintf (stderr, "scratch:\n");
20325 debug_rtx (scratch);
20328 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20329 gcc_assert (GET_CODE (mem) == MEM);
20330 rclass = REGNO_REG_CLASS (regno);
20331 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20332 addr = XEXP (mem, 0);
20334 if (GET_CODE (addr) == PRE_MODIFY)
20336 gcc_assert (REG_P (XEXP (addr, 0))
20337 && GET_CODE (XEXP (addr, 1)) == PLUS
20338 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20339 scratch_or_premodify = XEXP (addr, 0);
20340 if (!HARD_REGISTER_P (scratch_or_premodify))
20341 /* If we have a pseudo here then reload will have arranged
20342 to have it replaced, but only in the original insn.
20343 Use the replacement here too. */
20344 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20346 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20347 expressions from the original insn, without unsharing them.
20348 Any RTL that points into the original insn will of course
20349 have register replacements applied. That is why we don't
20350 need to look for replacements under the PLUS. */
20351 addr = XEXP (addr, 1);
20353 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20355 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20357 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20359 /* Now create the move. */
20360 if (store_p)
20361 emit_insn (gen_rtx_SET (mem, reg));
20362 else
20363 emit_insn (gen_rtx_SET (reg, mem));
20365 return;
20368 /* Given an rtx X being reloaded into a reg required to be
20369 in class CLASS, return the class of reg to actually use.
20370 In general this is just CLASS; but on some machines
20371 in some cases it is preferable to use a more restrictive class.
20373 On the RS/6000, we have to return NO_REGS when we want to reload a
20374 floating-point CONST_DOUBLE to force it to be copied to memory.
20376 We also don't want to reload integer values into floating-point
20377 registers if we can at all help it. In fact, this can
20378 cause reload to die, if it tries to generate a reload of CTR
20379 into a FP register and discovers it doesn't have the memory location
20380 required.
20382 ??? Would it be a good idea to have reload do the converse, that is
20383 try to reload floating modes into FP registers if possible?
20386 static enum reg_class
20387 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20389 machine_mode mode = GET_MODE (x);
20390 bool is_constant = CONSTANT_P (x);
20392 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20393 reload class for it. */
20394 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20395 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20396 return NO_REGS;
20398 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20399 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20400 return NO_REGS;
20402 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20403 the reloading of address expressions using PLUS into floating point
20404 registers. */
20405 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20407 if (is_constant)
20409 /* Zero is always allowed in all VSX registers. */
20410 if (x == CONST0_RTX (mode))
20411 return rclass;
20413 /* If this is a vector constant that can be formed with a few Altivec
20414 instructions, we want altivec registers. */
20415 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20416 return ALTIVEC_REGS;
20418 /* If this is an integer constant that can easily be loaded into
20419 vector registers, allow it. */
20420 if (CONST_INT_P (x))
20422 HOST_WIDE_INT value = INTVAL (x);
20424 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20425 2.06 can generate it in the Altivec registers with
20426 VSPLTI<x>. */
20427 if (value == -1)
20429 if (TARGET_P8_VECTOR)
20430 return rclass;
20431 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20432 return ALTIVEC_REGS;
20433 else
20434 return NO_REGS;
20437 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20438 a sign extend in the Altivec registers. */
20439 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20440 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20441 return ALTIVEC_REGS;
20444 /* Force constant to memory. */
20445 return NO_REGS;
20448 /* D-form addressing can easily reload the value. */
20449 if (mode_supports_vmx_dform (mode)
20450 || mode_supports_vsx_dform_quad (mode))
20451 return rclass;
20453 /* If this is a scalar floating point value and we don't have D-form
20454 addressing, prefer the traditional floating point registers so that we
20455 can use D-form (register+offset) addressing. */
20456 if (rclass == VSX_REGS
20457 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20458 return FLOAT_REGS;
20460 /* Prefer the Altivec registers if Altivec is handling the vector
20461 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20462 loads. */
20463 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20464 || mode == V1TImode)
20465 return ALTIVEC_REGS;
20467 return rclass;
20470 if (is_constant || GET_CODE (x) == PLUS)
20472 if (reg_class_subset_p (GENERAL_REGS, rclass))
20473 return GENERAL_REGS;
20474 if (reg_class_subset_p (BASE_REGS, rclass))
20475 return BASE_REGS;
20476 return NO_REGS;
20479 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20480 return GENERAL_REGS;
20482 return rclass;
20485 /* Debug version of rs6000_preferred_reload_class. */
20486 static enum reg_class
20487 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20489 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20491 fprintf (stderr,
20492 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20493 "mode = %s, x:\n",
20494 reg_class_names[ret], reg_class_names[rclass],
20495 GET_MODE_NAME (GET_MODE (x)));
20496 debug_rtx (x);
20498 return ret;
20501 /* If we are copying between FP or AltiVec registers and anything else, we need
20502 a memory location. The exception is when we are targeting ppc64 and the
20503 move to/from fpr to gpr instructions are available. Also, under VSX, you
20504 can copy vector registers from the FP register set to the Altivec register
20505 set and vice versa. */
20507 static bool
20508 rs6000_secondary_memory_needed (enum reg_class from_class,
20509 enum reg_class to_class,
20510 machine_mode mode)
20512 enum rs6000_reg_type from_type, to_type;
20513 bool altivec_p = ((from_class == ALTIVEC_REGS)
20514 || (to_class == ALTIVEC_REGS));
20516 /* If a simple/direct move is available, we don't need secondary memory */
20517 from_type = reg_class_to_reg_type[(int)from_class];
20518 to_type = reg_class_to_reg_type[(int)to_class];
20520 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20521 (secondary_reload_info *)0, altivec_p))
20522 return false;
20524 /* If we have a floating point or vector register class, we need to use
20525 memory to transfer the data. */
20526 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20527 return true;
20529 return false;
20532 /* Debug version of rs6000_secondary_memory_needed. */
20533 static bool
20534 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
20535 enum reg_class to_class,
20536 machine_mode mode)
20538 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
20540 fprintf (stderr,
20541 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20542 "to_class = %s, mode = %s\n",
20543 ret ? "true" : "false",
20544 reg_class_names[from_class],
20545 reg_class_names[to_class],
20546 GET_MODE_NAME (mode));
20548 return ret;
20551 /* Return the register class of a scratch register needed to copy IN into
20552 or out of a register in RCLASS in MODE. If it can be done directly,
20553 NO_REGS is returned. */
20555 static enum reg_class
20556 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20557 rtx in)
20559 int regno;
20561 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20562 #if TARGET_MACHO
20563 && MACHOPIC_INDIRECT
20564 #endif
20567 /* We cannot copy a symbolic operand directly into anything
20568 other than BASE_REGS for TARGET_ELF. So indicate that a
20569 register from BASE_REGS is needed as an intermediate
20570 register.
20572 On Darwin, pic addresses require a load from memory, which
20573 needs a base register. */
20574 if (rclass != BASE_REGS
20575 && (GET_CODE (in) == SYMBOL_REF
20576 || GET_CODE (in) == HIGH
20577 || GET_CODE (in) == LABEL_REF
20578 || GET_CODE (in) == CONST))
20579 return BASE_REGS;
20582 if (GET_CODE (in) == REG)
20584 regno = REGNO (in);
20585 if (regno >= FIRST_PSEUDO_REGISTER)
20587 regno = true_regnum (in);
20588 if (regno >= FIRST_PSEUDO_REGISTER)
20589 regno = -1;
20592 else if (GET_CODE (in) == SUBREG)
20594 regno = true_regnum (in);
20595 if (regno >= FIRST_PSEUDO_REGISTER)
20596 regno = -1;
20598 else
20599 regno = -1;
20601 /* If we have VSX register moves, prefer moving scalar values between
20602 Altivec registers and GPR by going via an FPR (and then via memory)
20603 instead of reloading the secondary memory address for Altivec moves. */
20604 if (TARGET_VSX
20605 && GET_MODE_SIZE (mode) < 16
20606 && !mode_supports_vmx_dform (mode)
20607 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20608 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20609 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20610 && (regno >= 0 && INT_REGNO_P (regno)))))
20611 return FLOAT_REGS;
20613 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20614 into anything. */
20615 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20616 || (regno >= 0 && INT_REGNO_P (regno)))
20617 return NO_REGS;
20619 /* Constants, memory, and VSX registers can go into VSX registers (both the
20620 traditional floating point and the altivec registers). */
20621 if (rclass == VSX_REGS
20622 && (regno == -1 || VSX_REGNO_P (regno)))
20623 return NO_REGS;
20625 /* Constants, memory, and FP registers can go into FP registers. */
20626 if ((regno == -1 || FP_REGNO_P (regno))
20627 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20628 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20630 /* Memory, and AltiVec registers can go into AltiVec registers. */
20631 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20632 && rclass == ALTIVEC_REGS)
20633 return NO_REGS;
20635 /* We can copy among the CR registers. */
20636 if ((rclass == CR_REGS || rclass == CR0_REGS)
20637 && regno >= 0 && CR_REGNO_P (regno))
20638 return NO_REGS;
20640 /* Otherwise, we need GENERAL_REGS. */
20641 return GENERAL_REGS;
20644 /* Debug version of rs6000_secondary_reload_class. */
20645 static enum reg_class
20646 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20647 machine_mode mode, rtx in)
20649 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20650 fprintf (stderr,
20651 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20652 "mode = %s, input rtx:\n",
20653 reg_class_names[ret], reg_class_names[rclass],
20654 GET_MODE_NAME (mode));
20655 debug_rtx (in);
20657 return ret;
20660 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20662 static bool
20663 rs6000_cannot_change_mode_class (machine_mode from,
20664 machine_mode to,
20665 enum reg_class rclass)
20667 unsigned from_size = GET_MODE_SIZE (from);
20668 unsigned to_size = GET_MODE_SIZE (to);
20670 if (from_size != to_size)
20672 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20674 if (reg_classes_intersect_p (xclass, rclass))
20676 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
20677 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
20678 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20679 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20681 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20682 single register under VSX because the scalar part of the register
20683 is in the upper 64-bits, and not the lower 64-bits. Types like
20684 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20685 IEEE floating point can't overlap, and neither can small
20686 values. */
20688 if (to_float128_vector_p && from_float128_vector_p)
20689 return false;
20691 else if (to_float128_vector_p || from_float128_vector_p)
20692 return true;
20694 /* TDmode in floating-mode registers must always go into a register
20695 pair with the most significant word in the even-numbered register
20696 to match ISA requirements. In little-endian mode, this does not
20697 match subreg numbering, so we cannot allow subregs. */
20698 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20699 return true;
20701 if (from_size < 8 || to_size < 8)
20702 return true;
20704 if (from_size == 8 && (8 * to_nregs) != to_size)
20705 return true;
20707 if (to_size == 8 && (8 * from_nregs) != from_size)
20708 return true;
20710 return false;
20712 else
20713 return false;
20716 /* Since the VSX register set includes traditional floating point registers
20717 and altivec registers, just check for the size being different instead of
20718 trying to check whether the modes are vector modes. Otherwise it won't
20719 allow say DF and DI to change classes. For types like TFmode and TDmode
20720 that take 2 64-bit registers, rather than a single 128-bit register, don't
20721 allow subregs of those types to other 128 bit types. */
20722 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20724 unsigned num_regs = (from_size + 15) / 16;
20725 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
20726 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
20727 return true;
20729 return (from_size != 8 && from_size != 16);
20732 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20733 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20734 return true;
20736 return false;
20739 /* Debug version of rs6000_cannot_change_mode_class. */
20740 static bool
20741 rs6000_debug_cannot_change_mode_class (machine_mode from,
20742 machine_mode to,
20743 enum reg_class rclass)
20745 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
20747 fprintf (stderr,
20748 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20749 "to = %s, rclass = %s\n",
20750 ret ? "true" : "false",
20751 GET_MODE_NAME (from), GET_MODE_NAME (to),
20752 reg_class_names[rclass]);
20754 return ret;
20757 /* Return a string to do a move operation of 128 bits of data. */
20759 const char *
20760 rs6000_output_move_128bit (rtx operands[])
20762 rtx dest = operands[0];
20763 rtx src = operands[1];
20764 machine_mode mode = GET_MODE (dest);
20765 int dest_regno;
20766 int src_regno;
20767 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20768 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20770 if (REG_P (dest))
20772 dest_regno = REGNO (dest);
20773 dest_gpr_p = INT_REGNO_P (dest_regno);
20774 dest_fp_p = FP_REGNO_P (dest_regno);
20775 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20776 dest_vsx_p = dest_fp_p | dest_vmx_p;
20778 else
20780 dest_regno = -1;
20781 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20784 if (REG_P (src))
20786 src_regno = REGNO (src);
20787 src_gpr_p = INT_REGNO_P (src_regno);
20788 src_fp_p = FP_REGNO_P (src_regno);
20789 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20790 src_vsx_p = src_fp_p | src_vmx_p;
20792 else
20794 src_regno = -1;
20795 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20798 /* Register moves. */
20799 if (dest_regno >= 0 && src_regno >= 0)
20801 if (dest_gpr_p)
20803 if (src_gpr_p)
20804 return "#";
20806 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20807 return (WORDS_BIG_ENDIAN
20808 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20809 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20811 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20812 return "#";
20815 else if (TARGET_VSX && dest_vsx_p)
20817 if (src_vsx_p)
20818 return "xxlor %x0,%x1,%x1";
20820 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20821 return (WORDS_BIG_ENDIAN
20822 ? "mtvsrdd %x0,%1,%L1"
20823 : "mtvsrdd %x0,%L1,%1");
20825 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20826 return "#";
20829 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20830 return "vor %0,%1,%1";
20832 else if (dest_fp_p && src_fp_p)
20833 return "#";
20836 /* Loads. */
20837 else if (dest_regno >= 0 && MEM_P (src))
20839 if (dest_gpr_p)
20841 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20842 return "lq %0,%1";
20843 else
20844 return "#";
20847 else if (TARGET_ALTIVEC && dest_vmx_p
20848 && altivec_indexed_or_indirect_operand (src, mode))
20849 return "lvx %0,%y1";
20851 else if (TARGET_VSX && dest_vsx_p)
20853 if (mode_supports_vsx_dform_quad (mode)
20854 && quad_address_p (XEXP (src, 0), mode, true))
20855 return "lxv %x0,%1";
20857 else if (TARGET_P9_VECTOR)
20858 return "lxvx %x0,%y1";
20860 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20861 return "lxvw4x %x0,%y1";
20863 else
20864 return "lxvd2x %x0,%y1";
20867 else if (TARGET_ALTIVEC && dest_vmx_p)
20868 return "lvx %0,%y1";
20870 else if (dest_fp_p)
20871 return "#";
20874 /* Stores. */
20875 else if (src_regno >= 0 && MEM_P (dest))
20877 if (src_gpr_p)
20879 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20880 return "stq %1,%0";
20881 else
20882 return "#";
20885 else if (TARGET_ALTIVEC && src_vmx_p
20886 && altivec_indexed_or_indirect_operand (src, mode))
20887 return "stvx %1,%y0";
20889 else if (TARGET_VSX && src_vsx_p)
20891 if (mode_supports_vsx_dform_quad (mode)
20892 && quad_address_p (XEXP (dest, 0), mode, true))
20893 return "stxv %x1,%0";
20895 else if (TARGET_P9_VECTOR)
20896 return "stxvx %x1,%y0";
20898 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20899 return "stxvw4x %x1,%y0";
20901 else
20902 return "stxvd2x %x1,%y0";
20905 else if (TARGET_ALTIVEC && src_vmx_p)
20906 return "stvx %1,%y0";
20908 else if (src_fp_p)
20909 return "#";
20912 /* Constants. */
20913 else if (dest_regno >= 0
20914 && (GET_CODE (src) == CONST_INT
20915 || GET_CODE (src) == CONST_WIDE_INT
20916 || GET_CODE (src) == CONST_DOUBLE
20917 || GET_CODE (src) == CONST_VECTOR))
20919 if (dest_gpr_p)
20920 return "#";
20922 else if ((dest_vmx_p && TARGET_ALTIVEC)
20923 || (dest_vsx_p && TARGET_VSX))
20924 return output_vec_const_move (operands);
20927 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20930 /* Validate a 128-bit move. */
20931 bool
20932 rs6000_move_128bit_ok_p (rtx operands[])
20934 machine_mode mode = GET_MODE (operands[0]);
20935 return (gpc_reg_operand (operands[0], mode)
20936 || gpc_reg_operand (operands[1], mode));
20939 /* Return true if a 128-bit move needs to be split. */
20940 bool
20941 rs6000_split_128bit_ok_p (rtx operands[])
20943 if (!reload_completed)
20944 return false;
20946 if (!gpr_or_gpr_p (operands[0], operands[1]))
20947 return false;
20949 if (quad_load_store_p (operands[0], operands[1]))
20950 return false;
20952 return true;
20956 /* Given a comparison operation, return the bit number in CCR to test. We
20957 know this is a valid comparison.
20959 SCC_P is 1 if this is for an scc. That means that %D will have been
20960 used instead of %C, so the bits will be in different places.
20962 Return -1 if OP isn't a valid comparison for some reason. */
20965 ccr_bit (rtx op, int scc_p)
20967 enum rtx_code code = GET_CODE (op);
20968 machine_mode cc_mode;
20969 int cc_regnum;
20970 int base_bit;
20971 rtx reg;
20973 if (!COMPARISON_P (op))
20974 return -1;
20976 reg = XEXP (op, 0);
20978 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20980 cc_mode = GET_MODE (reg);
20981 cc_regnum = REGNO (reg);
20982 base_bit = 4 * (cc_regnum - CR0_REGNO);
20984 validate_condition_mode (code, cc_mode);
20986 /* When generating a sCOND operation, only positive conditions are
20987 allowed. */
20988 gcc_assert (!scc_p
20989 || code == EQ || code == GT || code == LT || code == UNORDERED
20990 || code == GTU || code == LTU);
20992 switch (code)
20994 case NE:
20995 return scc_p ? base_bit + 3 : base_bit + 2;
20996 case EQ:
20997 return base_bit + 2;
20998 case GT: case GTU: case UNLE:
20999 return base_bit + 1;
21000 case LT: case LTU: case UNGE:
21001 return base_bit;
21002 case ORDERED: case UNORDERED:
21003 return base_bit + 3;
21005 case GE: case GEU:
21006 /* If scc, we will have done a cror to put the bit in the
21007 unordered position. So test that bit. For integer, this is ! LT
21008 unless this is an scc insn. */
21009 return scc_p ? base_bit + 3 : base_bit;
21011 case LE: case LEU:
21012 return scc_p ? base_bit + 3 : base_bit + 1;
21014 default:
21015 gcc_unreachable ();
21019 /* Return the GOT register. */
21022 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
21024 /* The second flow pass currently (June 1999) can't update
21025 regs_ever_live without disturbing other parts of the compiler, so
21026 update it here to make the prolog/epilogue code happy. */
21027 if (!can_create_pseudo_p ()
21028 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
21029 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
21031 crtl->uses_pic_offset_table = 1;
21033 return pic_offset_table_rtx;
21036 static rs6000_stack_t stack_info;
21038 /* Function to init struct machine_function.
21039 This will be called, via a pointer variable,
21040 from push_function_context. */
21042 static struct machine_function *
21043 rs6000_init_machine_status (void)
21045 stack_info.reload_completed = 0;
21046 return ggc_cleared_alloc<machine_function> ();
21049 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21051 /* Write out a function code label. */
21053 void
21054 rs6000_output_function_entry (FILE *file, const char *fname)
21056 if (fname[0] != '.')
21058 switch (DEFAULT_ABI)
21060 default:
21061 gcc_unreachable ();
21063 case ABI_AIX:
21064 if (DOT_SYMBOLS)
21065 putc ('.', file);
21066 else
21067 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21068 break;
21070 case ABI_ELFv2:
21071 case ABI_V4:
21072 case ABI_DARWIN:
21073 break;
21077 RS6000_OUTPUT_BASENAME (file, fname);
21080 /* Print an operand. Recognize special options, documented below. */
21082 #if TARGET_ELF
21083 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21084 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21085 #else
21086 #define SMALL_DATA_RELOC "sda21"
21087 #define SMALL_DATA_REG 0
21088 #endif
21090 void
21091 print_operand (FILE *file, rtx x, int code)
21093 int i;
21094 unsigned HOST_WIDE_INT uval;
21096 switch (code)
21098 /* %a is output_address. */
21100 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21101 output_operand. */
21103 case 'D':
21104 /* Like 'J' but get to the GT bit only. */
21105 gcc_assert (REG_P (x));
21107 /* Bit 1 is GT bit. */
21108 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21110 /* Add one for shift count in rlinm for scc. */
21111 fprintf (file, "%d", i + 1);
21112 return;
21114 case 'e':
21115 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21116 if (! INT_P (x))
21118 output_operand_lossage ("invalid %%e value");
21119 return;
21122 uval = INTVAL (x);
21123 if ((uval & 0xffff) == 0 && uval != 0)
21124 putc ('s', file);
21125 return;
21127 case 'E':
21128 /* X is a CR register. Print the number of the EQ bit of the CR */
21129 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21130 output_operand_lossage ("invalid %%E value");
21131 else
21132 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21133 return;
21135 case 'f':
21136 /* X is a CR register. Print the shift count needed to move it
21137 to the high-order four bits. */
21138 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21139 output_operand_lossage ("invalid %%f value");
21140 else
21141 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21142 return;
21144 case 'F':
21145 /* Similar, but print the count for the rotate in the opposite
21146 direction. */
21147 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21148 output_operand_lossage ("invalid %%F value");
21149 else
21150 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21151 return;
21153 case 'G':
21154 /* X is a constant integer. If it is negative, print "m",
21155 otherwise print "z". This is to make an aze or ame insn. */
21156 if (GET_CODE (x) != CONST_INT)
21157 output_operand_lossage ("invalid %%G value");
21158 else if (INTVAL (x) >= 0)
21159 putc ('z', file);
21160 else
21161 putc ('m', file);
21162 return;
21164 case 'h':
21165 /* If constant, output low-order five bits. Otherwise, write
21166 normally. */
21167 if (INT_P (x))
21168 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21169 else
21170 print_operand (file, x, 0);
21171 return;
21173 case 'H':
21174 /* If constant, output low-order six bits. Otherwise, write
21175 normally. */
21176 if (INT_P (x))
21177 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21178 else
21179 print_operand (file, x, 0);
21180 return;
21182 case 'I':
21183 /* Print `i' if this is a constant, else nothing. */
21184 if (INT_P (x))
21185 putc ('i', file);
21186 return;
21188 case 'j':
21189 /* Write the bit number in CCR for jump. */
21190 i = ccr_bit (x, 0);
21191 if (i == -1)
21192 output_operand_lossage ("invalid %%j code");
21193 else
21194 fprintf (file, "%d", i);
21195 return;
21197 case 'J':
21198 /* Similar, but add one for shift count in rlinm for scc and pass
21199 scc flag to `ccr_bit'. */
21200 i = ccr_bit (x, 1);
21201 if (i == -1)
21202 output_operand_lossage ("invalid %%J code");
21203 else
21204 /* If we want bit 31, write a shift count of zero, not 32. */
21205 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21206 return;
21208 case 'k':
21209 /* X must be a constant. Write the 1's complement of the
21210 constant. */
21211 if (! INT_P (x))
21212 output_operand_lossage ("invalid %%k value");
21213 else
21214 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21215 return;
21217 case 'K':
21218 /* X must be a symbolic constant on ELF. Write an
21219 expression suitable for an 'addi' that adds in the low 16
21220 bits of the MEM. */
21221 if (GET_CODE (x) == CONST)
21223 if (GET_CODE (XEXP (x, 0)) != PLUS
21224 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21225 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21226 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21227 output_operand_lossage ("invalid %%K value");
21229 print_operand_address (file, x);
21230 fputs ("@l", file);
21231 return;
21233 /* %l is output_asm_label. */
21235 case 'L':
21236 /* Write second word of DImode or DFmode reference. Works on register
21237 or non-indexed memory only. */
21238 if (REG_P (x))
21239 fputs (reg_names[REGNO (x) + 1], file);
21240 else if (MEM_P (x))
21242 machine_mode mode = GET_MODE (x);
21243 /* Handle possible auto-increment. Since it is pre-increment and
21244 we have already done it, we can just use an offset of word. */
21245 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21246 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21247 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21248 UNITS_PER_WORD));
21249 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21250 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21251 UNITS_PER_WORD));
21252 else
21253 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21254 UNITS_PER_WORD),
21255 0));
21257 if (small_data_operand (x, GET_MODE (x)))
21258 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21259 reg_names[SMALL_DATA_REG]);
21261 return;
21263 case 'N':
21264 /* Write the number of elements in the vector times 4. */
21265 if (GET_CODE (x) != PARALLEL)
21266 output_operand_lossage ("invalid %%N value");
21267 else
21268 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21269 return;
21271 case 'O':
21272 /* Similar, but subtract 1 first. */
21273 if (GET_CODE (x) != PARALLEL)
21274 output_operand_lossage ("invalid %%O value");
21275 else
21276 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21277 return;
21279 case 'p':
21280 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21281 if (! INT_P (x)
21282 || INTVAL (x) < 0
21283 || (i = exact_log2 (INTVAL (x))) < 0)
21284 output_operand_lossage ("invalid %%p value");
21285 else
21286 fprintf (file, "%d", i);
21287 return;
21289 case 'P':
21290 /* The operand must be an indirect memory reference. The result
21291 is the register name. */
21292 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21293 || REGNO (XEXP (x, 0)) >= 32)
21294 output_operand_lossage ("invalid %%P value");
21295 else
21296 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21297 return;
21299 case 'q':
21300 /* This outputs the logical code corresponding to a boolean
21301 expression. The expression may have one or both operands
21302 negated (if one, only the first one). For condition register
21303 logical operations, it will also treat the negated
21304 CR codes as NOTs, but not handle NOTs of them. */
21306 const char *const *t = 0;
21307 const char *s;
21308 enum rtx_code code = GET_CODE (x);
21309 static const char * const tbl[3][3] = {
21310 { "and", "andc", "nor" },
21311 { "or", "orc", "nand" },
21312 { "xor", "eqv", "xor" } };
21314 if (code == AND)
21315 t = tbl[0];
21316 else if (code == IOR)
21317 t = tbl[1];
21318 else if (code == XOR)
21319 t = tbl[2];
21320 else
21321 output_operand_lossage ("invalid %%q value");
21323 if (GET_CODE (XEXP (x, 0)) != NOT)
21324 s = t[0];
21325 else
21327 if (GET_CODE (XEXP (x, 1)) == NOT)
21328 s = t[2];
21329 else
21330 s = t[1];
21333 fputs (s, file);
21335 return;
21337 case 'Q':
21338 if (! TARGET_MFCRF)
21339 return;
21340 fputc (',', file);
21341 /* FALLTHRU */
21343 case 'R':
21344 /* X is a CR register. Print the mask for `mtcrf'. */
21345 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21346 output_operand_lossage ("invalid %%R value");
21347 else
21348 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21349 return;
21351 case 's':
21352 /* Low 5 bits of 32 - value */
21353 if (! INT_P (x))
21354 output_operand_lossage ("invalid %%s value");
21355 else
21356 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21357 return;
21359 case 't':
21360 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21361 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21363 /* Bit 3 is OV bit. */
21364 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21366 /* If we want bit 31, write a shift count of zero, not 32. */
21367 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21368 return;
21370 case 'T':
21371 /* Print the symbolic name of a branch target register. */
21372 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21373 && REGNO (x) != CTR_REGNO))
21374 output_operand_lossage ("invalid %%T value");
21375 else if (REGNO (x) == LR_REGNO)
21376 fputs ("lr", file);
21377 else
21378 fputs ("ctr", file);
21379 return;
21381 case 'u':
21382 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21383 for use in unsigned operand. */
21384 if (! INT_P (x))
21386 output_operand_lossage ("invalid %%u value");
21387 return;
21390 uval = INTVAL (x);
21391 if ((uval & 0xffff) == 0)
21392 uval >>= 16;
21394 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21395 return;
21397 case 'v':
21398 /* High-order 16 bits of constant for use in signed operand. */
21399 if (! INT_P (x))
21400 output_operand_lossage ("invalid %%v value");
21401 else
21402 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21403 (INTVAL (x) >> 16) & 0xffff);
21404 return;
21406 case 'U':
21407 /* Print `u' if this has an auto-increment or auto-decrement. */
21408 if (MEM_P (x)
21409 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21410 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21411 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21412 putc ('u', file);
21413 return;
21415 case 'V':
21416 /* Print the trap code for this operand. */
21417 switch (GET_CODE (x))
21419 case EQ:
21420 fputs ("eq", file); /* 4 */
21421 break;
21422 case NE:
21423 fputs ("ne", file); /* 24 */
21424 break;
21425 case LT:
21426 fputs ("lt", file); /* 16 */
21427 break;
21428 case LE:
21429 fputs ("le", file); /* 20 */
21430 break;
21431 case GT:
21432 fputs ("gt", file); /* 8 */
21433 break;
21434 case GE:
21435 fputs ("ge", file); /* 12 */
21436 break;
21437 case LTU:
21438 fputs ("llt", file); /* 2 */
21439 break;
21440 case LEU:
21441 fputs ("lle", file); /* 6 */
21442 break;
21443 case GTU:
21444 fputs ("lgt", file); /* 1 */
21445 break;
21446 case GEU:
21447 fputs ("lge", file); /* 5 */
21448 break;
21449 default:
21450 gcc_unreachable ();
21452 break;
21454 case 'w':
21455 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21456 normally. */
21457 if (INT_P (x))
21458 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21459 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21460 else
21461 print_operand (file, x, 0);
21462 return;
21464 case 'x':
21465 /* X is a FPR or Altivec register used in a VSX context. */
21466 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21467 output_operand_lossage ("invalid %%x value");
21468 else
21470 int reg = REGNO (x);
21471 int vsx_reg = (FP_REGNO_P (reg)
21472 ? reg - 32
21473 : reg - FIRST_ALTIVEC_REGNO + 32);
21475 #ifdef TARGET_REGNAMES
21476 if (TARGET_REGNAMES)
21477 fprintf (file, "%%vs%d", vsx_reg);
21478 else
21479 #endif
21480 fprintf (file, "%d", vsx_reg);
21482 return;
21484 case 'X':
21485 if (MEM_P (x)
21486 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21487 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21488 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21489 putc ('x', file);
21490 return;
21492 case 'Y':
21493 /* Like 'L', for third word of TImode/PTImode */
21494 if (REG_P (x))
21495 fputs (reg_names[REGNO (x) + 2], file);
21496 else if (MEM_P (x))
21498 machine_mode mode = GET_MODE (x);
21499 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21500 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21501 output_address (mode, plus_constant (Pmode,
21502 XEXP (XEXP (x, 0), 0), 8));
21503 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21504 output_address (mode, plus_constant (Pmode,
21505 XEXP (XEXP (x, 0), 0), 8));
21506 else
21507 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21508 if (small_data_operand (x, GET_MODE (x)))
21509 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21510 reg_names[SMALL_DATA_REG]);
21512 return;
21514 case 'z':
21515 /* X is a SYMBOL_REF. Write out the name preceded by a
21516 period and without any trailing data in brackets. Used for function
21517 names. If we are configured for System V (or the embedded ABI) on
21518 the PowerPC, do not emit the period, since those systems do not use
21519 TOCs and the like. */
21520 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21522 /* For macho, check to see if we need a stub. */
21523 if (TARGET_MACHO)
21525 const char *name = XSTR (x, 0);
21526 #if TARGET_MACHO
21527 if (darwin_emit_branch_islands
21528 && MACHOPIC_INDIRECT
21529 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21530 name = machopic_indirection_name (x, /*stub_p=*/true);
21531 #endif
21532 assemble_name (file, name);
21534 else if (!DOT_SYMBOLS)
21535 assemble_name (file, XSTR (x, 0));
21536 else
21537 rs6000_output_function_entry (file, XSTR (x, 0));
21538 return;
21540 case 'Z':
21541 /* Like 'L', for last word of TImode/PTImode. */
21542 if (REG_P (x))
21543 fputs (reg_names[REGNO (x) + 3], file);
21544 else if (MEM_P (x))
21546 machine_mode mode = GET_MODE (x);
21547 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21548 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21549 output_address (mode, plus_constant (Pmode,
21550 XEXP (XEXP (x, 0), 0), 12));
21551 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21552 output_address (mode, plus_constant (Pmode,
21553 XEXP (XEXP (x, 0), 0), 12));
21554 else
21555 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21556 if (small_data_operand (x, GET_MODE (x)))
21557 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21558 reg_names[SMALL_DATA_REG]);
21560 return;
21562 /* Print AltiVec memory operand. */
21563 case 'y':
21565 rtx tmp;
21567 gcc_assert (MEM_P (x));
21569 tmp = XEXP (x, 0);
21571 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21572 && GET_CODE (tmp) == AND
21573 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21574 && INTVAL (XEXP (tmp, 1)) == -16)
21575 tmp = XEXP (tmp, 0);
21576 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21577 && GET_CODE (tmp) == PRE_MODIFY)
21578 tmp = XEXP (tmp, 1);
21579 if (REG_P (tmp))
21580 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21581 else
21583 if (GET_CODE (tmp) != PLUS
21584 || !REG_P (XEXP (tmp, 0))
21585 || !REG_P (XEXP (tmp, 1)))
21587 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21588 break;
21591 if (REGNO (XEXP (tmp, 0)) == 0)
21592 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21593 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21594 else
21595 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21596 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21598 break;
21601 case 0:
21602 if (REG_P (x))
21603 fprintf (file, "%s", reg_names[REGNO (x)]);
21604 else if (MEM_P (x))
21606 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21607 know the width from the mode. */
21608 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21609 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21610 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21611 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21612 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21613 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21614 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21615 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21616 else
21617 output_address (GET_MODE (x), XEXP (x, 0));
21619 else
21621 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21622 /* This hack along with a corresponding hack in
21623 rs6000_output_addr_const_extra arranges to output addends
21624 where the assembler expects to find them. eg.
21625 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21626 without this hack would be output as "x@toc+4". We
21627 want "x+4@toc". */
21628 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21629 else
21630 output_addr_const (file, x);
21632 return;
21634 case '&':
21635 if (const char *name = get_some_local_dynamic_name ())
21636 assemble_name (file, name);
21637 else
21638 output_operand_lossage ("'%%&' used without any "
21639 "local dynamic TLS references");
21640 return;
21642 default:
21643 output_operand_lossage ("invalid %%xn code");
21647 /* Print the address of an operand. */
21649 void
21650 print_operand_address (FILE *file, rtx x)
21652 if (REG_P (x))
21653 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21654 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21655 || GET_CODE (x) == LABEL_REF)
21657 output_addr_const (file, x);
21658 if (small_data_operand (x, GET_MODE (x)))
21659 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21660 reg_names[SMALL_DATA_REG]);
21661 else
21662 gcc_assert (!TARGET_TOC);
21664 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21665 && REG_P (XEXP (x, 1)))
21667 if (REGNO (XEXP (x, 0)) == 0)
21668 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21669 reg_names[ REGNO (XEXP (x, 0)) ]);
21670 else
21671 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21672 reg_names[ REGNO (XEXP (x, 1)) ]);
21674 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21675 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21676 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21677 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21678 #if TARGET_MACHO
21679 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21680 && CONSTANT_P (XEXP (x, 1)))
21682 fprintf (file, "lo16(");
21683 output_addr_const (file, XEXP (x, 1));
21684 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21686 #endif
21687 #if TARGET_ELF
21688 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21689 && CONSTANT_P (XEXP (x, 1)))
21691 output_addr_const (file, XEXP (x, 1));
21692 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21694 #endif
21695 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21697 /* This hack along with a corresponding hack in
21698 rs6000_output_addr_const_extra arranges to output addends
21699 where the assembler expects to find them. eg.
21700 (lo_sum (reg 9)
21701 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21702 without this hack would be output as "x@toc+8@l(9)". We
21703 want "x+8@toc@l(9)". */
21704 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21705 if (GET_CODE (x) == LO_SUM)
21706 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21707 else
21708 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21710 else
21711 gcc_unreachable ();
21714 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21716 static bool
21717 rs6000_output_addr_const_extra (FILE *file, rtx x)
21719 if (GET_CODE (x) == UNSPEC)
21720 switch (XINT (x, 1))
21722 case UNSPEC_TOCREL:
21723 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21724 && REG_P (XVECEXP (x, 0, 1))
21725 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21726 output_addr_const (file, XVECEXP (x, 0, 0));
21727 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21729 if (INTVAL (tocrel_offset_oac) >= 0)
21730 fprintf (file, "+");
21731 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21733 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21735 putc ('-', file);
21736 assemble_name (file, toc_label_name);
21737 need_toc_init = 1;
21739 else if (TARGET_ELF)
21740 fputs ("@toc", file);
21741 return true;
21743 #if TARGET_MACHO
21744 case UNSPEC_MACHOPIC_OFFSET:
21745 output_addr_const (file, XVECEXP (x, 0, 0));
21746 putc ('-', file);
21747 machopic_output_function_base_name (file);
21748 return true;
21749 #endif
21751 return false;
21754 /* Target hook for assembling integer objects. The PowerPC version has
21755 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21756 is defined. It also needs to handle DI-mode objects on 64-bit
21757 targets. */
21759 static bool
21760 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21762 #ifdef RELOCATABLE_NEEDS_FIXUP
21763 /* Special handling for SI values. */
21764 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21766 static int recurse = 0;
21768 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21769 the .fixup section. Since the TOC section is already relocated, we
21770 don't need to mark it here. We used to skip the text section, but it
21771 should never be valid for relocated addresses to be placed in the text
21772 section. */
21773 if (DEFAULT_ABI == ABI_V4
21774 && (TARGET_RELOCATABLE || flag_pic > 1)
21775 && in_section != toc_section
21776 && !recurse
21777 && !CONST_SCALAR_INT_P (x)
21778 && CONSTANT_P (x))
21780 char buf[256];
21782 recurse = 1;
21783 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21784 fixuplabelno++;
21785 ASM_OUTPUT_LABEL (asm_out_file, buf);
21786 fprintf (asm_out_file, "\t.long\t(");
21787 output_addr_const (asm_out_file, x);
21788 fprintf (asm_out_file, ")@fixup\n");
21789 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21790 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21791 fprintf (asm_out_file, "\t.long\t");
21792 assemble_name (asm_out_file, buf);
21793 fprintf (asm_out_file, "\n\t.previous\n");
21794 recurse = 0;
21795 return true;
21797 /* Remove initial .'s to turn a -mcall-aixdesc function
21798 address into the address of the descriptor, not the function
21799 itself. */
21800 else if (GET_CODE (x) == SYMBOL_REF
21801 && XSTR (x, 0)[0] == '.'
21802 && DEFAULT_ABI == ABI_AIX)
21804 const char *name = XSTR (x, 0);
21805 while (*name == '.')
21806 name++;
21808 fprintf (asm_out_file, "\t.long\t%s\n", name);
21809 return true;
21812 #endif /* RELOCATABLE_NEEDS_FIXUP */
21813 return default_assemble_integer (x, size, aligned_p);
21816 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21817 /* Emit an assembler directive to set symbol visibility for DECL to
21818 VISIBILITY_TYPE. */
21820 static void
21821 rs6000_assemble_visibility (tree decl, int vis)
21823 if (TARGET_XCOFF)
21824 return;
21826 /* Functions need to have their entry point symbol visibility set as
21827 well as their descriptor symbol visibility. */
21828 if (DEFAULT_ABI == ABI_AIX
21829 && DOT_SYMBOLS
21830 && TREE_CODE (decl) == FUNCTION_DECL)
21832 static const char * const visibility_types[] = {
21833 NULL, "protected", "hidden", "internal"
21836 const char *name, *type;
21838 name = ((* targetm.strip_name_encoding)
21839 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21840 type = visibility_types[vis];
21842 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21843 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21845 else
21846 default_assemble_visibility (decl, vis);
21848 #endif
21850 enum rtx_code
21851 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21853 /* Reversal of FP compares takes care -- an ordered compare
21854 becomes an unordered compare and vice versa. */
21855 if (mode == CCFPmode
21856 && (!flag_finite_math_only
21857 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21858 || code == UNEQ || code == LTGT))
21859 return reverse_condition_maybe_unordered (code);
21860 else
21861 return reverse_condition (code);
21864 /* Generate a compare for CODE. Return a brand-new rtx that
21865 represents the result of the compare. */
21867 static rtx
21868 rs6000_generate_compare (rtx cmp, machine_mode mode)
21870 machine_mode comp_mode;
21871 rtx compare_result;
21872 enum rtx_code code = GET_CODE (cmp);
21873 rtx op0 = XEXP (cmp, 0);
21874 rtx op1 = XEXP (cmp, 1);
21876 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21877 comp_mode = CCmode;
21878 else if (FLOAT_MODE_P (mode))
21879 comp_mode = CCFPmode;
21880 else if (code == GTU || code == LTU
21881 || code == GEU || code == LEU)
21882 comp_mode = CCUNSmode;
21883 else if ((code == EQ || code == NE)
21884 && unsigned_reg_p (op0)
21885 && (unsigned_reg_p (op1)
21886 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21887 /* These are unsigned values, perhaps there will be a later
21888 ordering compare that can be shared with this one. */
21889 comp_mode = CCUNSmode;
21890 else
21891 comp_mode = CCmode;
21893 /* If we have an unsigned compare, make sure we don't have a signed value as
21894 an immediate. */
21895 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21896 && INTVAL (op1) < 0)
21898 op0 = copy_rtx_if_shared (op0);
21899 op1 = force_reg (GET_MODE (op0), op1);
21900 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21903 /* First, the compare. */
21904 compare_result = gen_reg_rtx (comp_mode);
21906 /* IEEE 128-bit support in VSX registers when we do not have hardware
21907 support. */
21908 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21910 rtx libfunc = NULL_RTX;
21911 bool check_nan = false;
21912 rtx dest;
21914 switch (code)
21916 case EQ:
21917 case NE:
21918 libfunc = optab_libfunc (eq_optab, mode);
21919 break;
21921 case GT:
21922 case GE:
21923 libfunc = optab_libfunc (ge_optab, mode);
21924 break;
21926 case LT:
21927 case LE:
21928 libfunc = optab_libfunc (le_optab, mode);
21929 break;
21931 case UNORDERED:
21932 case ORDERED:
21933 libfunc = optab_libfunc (unord_optab, mode);
21934 code = (code == UNORDERED) ? NE : EQ;
21935 break;
21937 case UNGE:
21938 case UNGT:
21939 check_nan = true;
21940 libfunc = optab_libfunc (ge_optab, mode);
21941 code = (code == UNGE) ? GE : GT;
21942 break;
21944 case UNLE:
21945 case UNLT:
21946 check_nan = true;
21947 libfunc = optab_libfunc (le_optab, mode);
21948 code = (code == UNLE) ? LE : LT;
21949 break;
21951 case UNEQ:
21952 case LTGT:
21953 check_nan = true;
21954 libfunc = optab_libfunc (eq_optab, mode);
21955 code = (code = UNEQ) ? EQ : NE;
21956 break;
21958 default:
21959 gcc_unreachable ();
21962 gcc_assert (libfunc);
21964 if (!check_nan)
21965 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21966 SImode, op0, mode, op1, mode);
21968 /* The library signals an exception for signalling NaNs, so we need to
21969 handle isgreater, etc. by first checking isordered. */
21970 else
21972 rtx ne_rtx, normal_dest, unord_dest;
21973 rtx unord_func = optab_libfunc (unord_optab, mode);
21974 rtx join_label = gen_label_rtx ();
21975 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21976 rtx unord_cmp = gen_reg_rtx (comp_mode);
21979 /* Test for either value being a NaN. */
21980 gcc_assert (unord_func);
21981 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21982 SImode, op0, mode, op1, mode);
21984 /* Set value (0) if either value is a NaN, and jump to the join
21985 label. */
21986 dest = gen_reg_rtx (SImode);
21987 emit_move_insn (dest, const1_rtx);
21988 emit_insn (gen_rtx_SET (unord_cmp,
21989 gen_rtx_COMPARE (comp_mode, unord_dest,
21990 const0_rtx)));
21992 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21993 emit_jump_insn (gen_rtx_SET (pc_rtx,
21994 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21995 join_ref,
21996 pc_rtx)));
21998 /* Do the normal comparison, knowing that the values are not
21999 NaNs. */
22000 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22001 SImode, op0, mode, op1, mode);
22003 emit_insn (gen_cstoresi4 (dest,
22004 gen_rtx_fmt_ee (code, SImode, normal_dest,
22005 const0_rtx),
22006 normal_dest, const0_rtx));
22008 /* Join NaN and non-Nan paths. Compare dest against 0. */
22009 emit_label (join_label);
22010 code = NE;
22013 emit_insn (gen_rtx_SET (compare_result,
22014 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22017 else
22019 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22020 CLOBBERs to match cmptf_internal2 pattern. */
22021 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22022 && FLOAT128_IBM_P (GET_MODE (op0))
22023 && TARGET_HARD_FLOAT)
22024 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22025 gen_rtvec (10,
22026 gen_rtx_SET (compare_result,
22027 gen_rtx_COMPARE (comp_mode, op0, op1)),
22028 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22029 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22030 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22031 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22032 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22033 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22034 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22035 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22036 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22037 else if (GET_CODE (op1) == UNSPEC
22038 && XINT (op1, 1) == UNSPEC_SP_TEST)
22040 rtx op1b = XVECEXP (op1, 0, 0);
22041 comp_mode = CCEQmode;
22042 compare_result = gen_reg_rtx (CCEQmode);
22043 if (TARGET_64BIT)
22044 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22045 else
22046 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22048 else
22049 emit_insn (gen_rtx_SET (compare_result,
22050 gen_rtx_COMPARE (comp_mode, op0, op1)));
22053 /* Some kinds of FP comparisons need an OR operation;
22054 under flag_finite_math_only we don't bother. */
22055 if (FLOAT_MODE_P (mode)
22056 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22057 && !flag_finite_math_only
22058 && (code == LE || code == GE
22059 || code == UNEQ || code == LTGT
22060 || code == UNGT || code == UNLT))
22062 enum rtx_code or1, or2;
22063 rtx or1_rtx, or2_rtx, compare2_rtx;
22064 rtx or_result = gen_reg_rtx (CCEQmode);
22066 switch (code)
22068 case LE: or1 = LT; or2 = EQ; break;
22069 case GE: or1 = GT; or2 = EQ; break;
22070 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22071 case LTGT: or1 = LT; or2 = GT; break;
22072 case UNGT: or1 = UNORDERED; or2 = GT; break;
22073 case UNLT: or1 = UNORDERED; or2 = LT; break;
22074 default: gcc_unreachable ();
22076 validate_condition_mode (or1, comp_mode);
22077 validate_condition_mode (or2, comp_mode);
22078 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22079 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22080 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22081 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22082 const_true_rtx);
22083 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22085 compare_result = or_result;
22086 code = EQ;
22089 validate_condition_mode (code, GET_MODE (compare_result));
22091 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22095 /* Return the diagnostic message string if the binary operation OP is
22096 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22098 static const char*
22099 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22100 const_tree type1,
22101 const_tree type2)
22103 machine_mode mode1 = TYPE_MODE (type1);
22104 machine_mode mode2 = TYPE_MODE (type2);
22106 /* For complex modes, use the inner type. */
22107 if (COMPLEX_MODE_P (mode1))
22108 mode1 = GET_MODE_INNER (mode1);
22110 if (COMPLEX_MODE_P (mode2))
22111 mode2 = GET_MODE_INNER (mode2);
22113 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22114 double to intermix unless -mfloat128-convert. */
22115 if (mode1 == mode2)
22116 return NULL;
22118 if (!TARGET_FLOAT128_CVT)
22120 if ((mode1 == KFmode && mode2 == IFmode)
22121 || (mode1 == IFmode && mode2 == KFmode))
22122 return N_("__float128 and __ibm128 cannot be used in the same "
22123 "expression");
22125 if (TARGET_IEEEQUAD
22126 && ((mode1 == IFmode && mode2 == TFmode)
22127 || (mode1 == TFmode && mode2 == IFmode)))
22128 return N_("__ibm128 and long double cannot be used in the same "
22129 "expression");
22131 if (!TARGET_IEEEQUAD
22132 && ((mode1 == KFmode && mode2 == TFmode)
22133 || (mode1 == TFmode && mode2 == KFmode)))
22134 return N_("__float128 and long double cannot be used in the same "
22135 "expression");
22138 return NULL;
22142 /* Expand floating point conversion to/from __float128 and __ibm128. */
22144 void
22145 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22147 machine_mode dest_mode = GET_MODE (dest);
22148 machine_mode src_mode = GET_MODE (src);
22149 convert_optab cvt = unknown_optab;
22150 bool do_move = false;
22151 rtx libfunc = NULL_RTX;
22152 rtx dest2;
22153 typedef rtx (*rtx_2func_t) (rtx, rtx);
22154 rtx_2func_t hw_convert = (rtx_2func_t)0;
22155 size_t kf_or_tf;
22157 struct hw_conv_t {
22158 rtx_2func_t from_df;
22159 rtx_2func_t from_sf;
22160 rtx_2func_t from_si_sign;
22161 rtx_2func_t from_si_uns;
22162 rtx_2func_t from_di_sign;
22163 rtx_2func_t from_di_uns;
22164 rtx_2func_t to_df;
22165 rtx_2func_t to_sf;
22166 rtx_2func_t to_si_sign;
22167 rtx_2func_t to_si_uns;
22168 rtx_2func_t to_di_sign;
22169 rtx_2func_t to_di_uns;
22170 } hw_conversions[2] = {
22171 /* convertions to/from KFmode */
22173 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22174 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22175 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22176 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22177 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22178 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22179 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22180 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22181 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22182 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22183 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22184 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22187 /* convertions to/from TFmode */
22189 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22190 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22191 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22192 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22193 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22194 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22195 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22196 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22197 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22198 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22199 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22200 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22204 if (dest_mode == src_mode)
22205 gcc_unreachable ();
22207 /* Eliminate memory operations. */
22208 if (MEM_P (src))
22209 src = force_reg (src_mode, src);
22211 if (MEM_P (dest))
22213 rtx tmp = gen_reg_rtx (dest_mode);
22214 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22215 rs6000_emit_move (dest, tmp, dest_mode);
22216 return;
22219 /* Convert to IEEE 128-bit floating point. */
22220 if (FLOAT128_IEEE_P (dest_mode))
22222 if (dest_mode == KFmode)
22223 kf_or_tf = 0;
22224 else if (dest_mode == TFmode)
22225 kf_or_tf = 1;
22226 else
22227 gcc_unreachable ();
22229 switch (src_mode)
22231 case E_DFmode:
22232 cvt = sext_optab;
22233 hw_convert = hw_conversions[kf_or_tf].from_df;
22234 break;
22236 case E_SFmode:
22237 cvt = sext_optab;
22238 hw_convert = hw_conversions[kf_or_tf].from_sf;
22239 break;
22241 case E_KFmode:
22242 case E_IFmode:
22243 case E_TFmode:
22244 if (FLOAT128_IBM_P (src_mode))
22245 cvt = sext_optab;
22246 else
22247 do_move = true;
22248 break;
22250 case E_SImode:
22251 if (unsigned_p)
22253 cvt = ufloat_optab;
22254 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22256 else
22258 cvt = sfloat_optab;
22259 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22261 break;
22263 case E_DImode:
22264 if (unsigned_p)
22266 cvt = ufloat_optab;
22267 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22269 else
22271 cvt = sfloat_optab;
22272 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22274 break;
22276 default:
22277 gcc_unreachable ();
22281 /* Convert from IEEE 128-bit floating point. */
22282 else if (FLOAT128_IEEE_P (src_mode))
22284 if (src_mode == KFmode)
22285 kf_or_tf = 0;
22286 else if (src_mode == TFmode)
22287 kf_or_tf = 1;
22288 else
22289 gcc_unreachable ();
22291 switch (dest_mode)
22293 case E_DFmode:
22294 cvt = trunc_optab;
22295 hw_convert = hw_conversions[kf_or_tf].to_df;
22296 break;
22298 case E_SFmode:
22299 cvt = trunc_optab;
22300 hw_convert = hw_conversions[kf_or_tf].to_sf;
22301 break;
22303 case E_KFmode:
22304 case E_IFmode:
22305 case E_TFmode:
22306 if (FLOAT128_IBM_P (dest_mode))
22307 cvt = trunc_optab;
22308 else
22309 do_move = true;
22310 break;
22312 case E_SImode:
22313 if (unsigned_p)
22315 cvt = ufix_optab;
22316 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22318 else
22320 cvt = sfix_optab;
22321 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22323 break;
22325 case E_DImode:
22326 if (unsigned_p)
22328 cvt = ufix_optab;
22329 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22331 else
22333 cvt = sfix_optab;
22334 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22336 break;
22338 default:
22339 gcc_unreachable ();
22343 /* Both IBM format. */
22344 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22345 do_move = true;
22347 else
22348 gcc_unreachable ();
22350 /* Handle conversion between TFmode/KFmode. */
22351 if (do_move)
22352 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22354 /* Handle conversion if we have hardware support. */
22355 else if (TARGET_FLOAT128_HW && hw_convert)
22356 emit_insn ((hw_convert) (dest, src));
22358 /* Call an external function to do the conversion. */
22359 else if (cvt != unknown_optab)
22361 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22362 gcc_assert (libfunc != NULL_RTX);
22364 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22365 src, src_mode);
22367 gcc_assert (dest2 != NULL_RTX);
22368 if (!rtx_equal_p (dest, dest2))
22369 emit_move_insn (dest, dest2);
22372 else
22373 gcc_unreachable ();
22375 return;
22379 /* Emit the RTL for an sISEL pattern. */
22381 void
22382 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22384 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22387 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22388 can be used as that dest register. Return the dest register. */
22391 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22393 if (op2 == const0_rtx)
22394 return op1;
22396 if (GET_CODE (scratch) == SCRATCH)
22397 scratch = gen_reg_rtx (mode);
22399 if (logical_operand (op2, mode))
22400 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22401 else
22402 emit_insn (gen_rtx_SET (scratch,
22403 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22405 return scratch;
22408 void
22409 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22411 rtx condition_rtx;
22412 machine_mode op_mode;
22413 enum rtx_code cond_code;
22414 rtx result = operands[0];
22416 condition_rtx = rs6000_generate_compare (operands[1], mode);
22417 cond_code = GET_CODE (condition_rtx);
22419 if (cond_code == NE
22420 || cond_code == GE || cond_code == LE
22421 || cond_code == GEU || cond_code == LEU
22422 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22424 rtx not_result = gen_reg_rtx (CCEQmode);
22425 rtx not_op, rev_cond_rtx;
22426 machine_mode cc_mode;
22428 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22430 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22431 SImode, XEXP (condition_rtx, 0), const0_rtx);
22432 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22433 emit_insn (gen_rtx_SET (not_result, not_op));
22434 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22437 op_mode = GET_MODE (XEXP (operands[1], 0));
22438 if (op_mode == VOIDmode)
22439 op_mode = GET_MODE (XEXP (operands[1], 1));
22441 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22443 PUT_MODE (condition_rtx, DImode);
22444 convert_move (result, condition_rtx, 0);
22446 else
22448 PUT_MODE (condition_rtx, SImode);
22449 emit_insn (gen_rtx_SET (result, condition_rtx));
22453 /* Emit a branch of kind CODE to location LOC. */
22455 void
22456 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22458 rtx condition_rtx, loc_ref;
22460 condition_rtx = rs6000_generate_compare (operands[0], mode);
22461 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22462 emit_jump_insn (gen_rtx_SET (pc_rtx,
22463 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22464 loc_ref, pc_rtx)));
22467 /* Return the string to output a conditional branch to LABEL, which is
22468 the operand template of the label, or NULL if the branch is really a
22469 conditional return.
22471 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22472 condition code register and its mode specifies what kind of
22473 comparison we made.
22475 REVERSED is nonzero if we should reverse the sense of the comparison.
22477 INSN is the insn. */
22479 char *
22480 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22482 static char string[64];
22483 enum rtx_code code = GET_CODE (op);
22484 rtx cc_reg = XEXP (op, 0);
22485 machine_mode mode = GET_MODE (cc_reg);
22486 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22487 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22488 int really_reversed = reversed ^ need_longbranch;
22489 char *s = string;
22490 const char *ccode;
22491 const char *pred;
22492 rtx note;
22494 validate_condition_mode (code, mode);
22496 /* Work out which way this really branches. We could use
22497 reverse_condition_maybe_unordered here always but this
22498 makes the resulting assembler clearer. */
22499 if (really_reversed)
22501 /* Reversal of FP compares takes care -- an ordered compare
22502 becomes an unordered compare and vice versa. */
22503 if (mode == CCFPmode)
22504 code = reverse_condition_maybe_unordered (code);
22505 else
22506 code = reverse_condition (code);
22509 switch (code)
22511 /* Not all of these are actually distinct opcodes, but
22512 we distinguish them for clarity of the resulting assembler. */
22513 case NE: case LTGT:
22514 ccode = "ne"; break;
22515 case EQ: case UNEQ:
22516 ccode = "eq"; break;
22517 case GE: case GEU:
22518 ccode = "ge"; break;
22519 case GT: case GTU: case UNGT:
22520 ccode = "gt"; break;
22521 case LE: case LEU:
22522 ccode = "le"; break;
22523 case LT: case LTU: case UNLT:
22524 ccode = "lt"; break;
22525 case UNORDERED: ccode = "un"; break;
22526 case ORDERED: ccode = "nu"; break;
22527 case UNGE: ccode = "nl"; break;
22528 case UNLE: ccode = "ng"; break;
22529 default:
22530 gcc_unreachable ();
22533 /* Maybe we have a guess as to how likely the branch is. */
22534 pred = "";
22535 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22536 if (note != NULL_RTX)
22538 /* PROB is the difference from 50%. */
22539 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22540 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22542 /* Only hint for highly probable/improbable branches on newer cpus when
22543 we have real profile data, as static prediction overrides processor
22544 dynamic prediction. For older cpus we may as well always hint, but
22545 assume not taken for branches that are very close to 50% as a
22546 mispredicted taken branch is more expensive than a
22547 mispredicted not-taken branch. */
22548 if (rs6000_always_hint
22549 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22550 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22551 && br_prob_note_reliable_p (note)))
22553 if (abs (prob) > REG_BR_PROB_BASE / 20
22554 && ((prob > 0) ^ need_longbranch))
22555 pred = "+";
22556 else
22557 pred = "-";
22561 if (label == NULL)
22562 s += sprintf (s, "b%slr%s ", ccode, pred);
22563 else
22564 s += sprintf (s, "b%s%s ", ccode, pred);
22566 /* We need to escape any '%' characters in the reg_names string.
22567 Assume they'd only be the first character.... */
22568 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22569 *s++ = '%';
22570 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22572 if (label != NULL)
22574 /* If the branch distance was too far, we may have to use an
22575 unconditional branch to go the distance. */
22576 if (need_longbranch)
22577 s += sprintf (s, ",$+8\n\tb %s", label);
22578 else
22579 s += sprintf (s, ",%s", label);
22582 return string;
22585 /* Return insn for VSX or Altivec comparisons. */
22587 static rtx
22588 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22590 rtx mask;
22591 machine_mode mode = GET_MODE (op0);
22593 switch (code)
22595 default:
22596 break;
22598 case GE:
22599 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22600 return NULL_RTX;
22601 /* FALLTHRU */
22603 case EQ:
22604 case GT:
22605 case GTU:
22606 case ORDERED:
22607 case UNORDERED:
22608 case UNEQ:
22609 case LTGT:
22610 mask = gen_reg_rtx (mode);
22611 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22612 return mask;
22615 return NULL_RTX;
22618 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22619 DMODE is expected destination mode. This is a recursive function. */
22621 static rtx
22622 rs6000_emit_vector_compare (enum rtx_code rcode,
22623 rtx op0, rtx op1,
22624 machine_mode dmode)
22626 rtx mask;
22627 bool swap_operands = false;
22628 bool try_again = false;
22630 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22631 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22633 /* See if the comparison works as is. */
22634 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22635 if (mask)
22636 return mask;
22638 switch (rcode)
22640 case LT:
22641 rcode = GT;
22642 swap_operands = true;
22643 try_again = true;
22644 break;
22645 case LTU:
22646 rcode = GTU;
22647 swap_operands = true;
22648 try_again = true;
22649 break;
22650 case NE:
22651 case UNLE:
22652 case UNLT:
22653 case UNGE:
22654 case UNGT:
22655 /* Invert condition and try again.
22656 e.g., A != B becomes ~(A==B). */
22658 enum rtx_code rev_code;
22659 enum insn_code nor_code;
22660 rtx mask2;
22662 rev_code = reverse_condition_maybe_unordered (rcode);
22663 if (rev_code == UNKNOWN)
22664 return NULL_RTX;
22666 nor_code = optab_handler (one_cmpl_optab, dmode);
22667 if (nor_code == CODE_FOR_nothing)
22668 return NULL_RTX;
22670 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22671 if (!mask2)
22672 return NULL_RTX;
22674 mask = gen_reg_rtx (dmode);
22675 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22676 return mask;
22678 break;
22679 case GE:
22680 case GEU:
22681 case LE:
22682 case LEU:
22683 /* Try GT/GTU/LT/LTU OR EQ */
22685 rtx c_rtx, eq_rtx;
22686 enum insn_code ior_code;
22687 enum rtx_code new_code;
22689 switch (rcode)
22691 case GE:
22692 new_code = GT;
22693 break;
22695 case GEU:
22696 new_code = GTU;
22697 break;
22699 case LE:
22700 new_code = LT;
22701 break;
22703 case LEU:
22704 new_code = LTU;
22705 break;
22707 default:
22708 gcc_unreachable ();
22711 ior_code = optab_handler (ior_optab, dmode);
22712 if (ior_code == CODE_FOR_nothing)
22713 return NULL_RTX;
22715 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22716 if (!c_rtx)
22717 return NULL_RTX;
22719 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22720 if (!eq_rtx)
22721 return NULL_RTX;
22723 mask = gen_reg_rtx (dmode);
22724 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22725 return mask;
22727 break;
22728 default:
22729 return NULL_RTX;
22732 if (try_again)
22734 if (swap_operands)
22735 std::swap (op0, op1);
22737 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22738 if (mask)
22739 return mask;
22742 /* You only get two chances. */
22743 return NULL_RTX;
22746 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22747 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22748 operands for the relation operation COND. */
22751 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22752 rtx cond, rtx cc_op0, rtx cc_op1)
22754 machine_mode dest_mode = GET_MODE (dest);
22755 machine_mode mask_mode = GET_MODE (cc_op0);
22756 enum rtx_code rcode = GET_CODE (cond);
22757 machine_mode cc_mode = CCmode;
22758 rtx mask;
22759 rtx cond2;
22760 bool invert_move = false;
22762 if (VECTOR_UNIT_NONE_P (dest_mode))
22763 return 0;
22765 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22766 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22768 switch (rcode)
22770 /* Swap operands if we can, and fall back to doing the operation as
22771 specified, and doing a NOR to invert the test. */
22772 case NE:
22773 case UNLE:
22774 case UNLT:
22775 case UNGE:
22776 case UNGT:
22777 /* Invert condition and try again.
22778 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22779 invert_move = true;
22780 rcode = reverse_condition_maybe_unordered (rcode);
22781 if (rcode == UNKNOWN)
22782 return 0;
22783 break;
22785 case GE:
22786 case LE:
22787 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22789 /* Invert condition to avoid compound test. */
22790 invert_move = true;
22791 rcode = reverse_condition (rcode);
22793 break;
22795 case GTU:
22796 case GEU:
22797 case LTU:
22798 case LEU:
22799 /* Mark unsigned tests with CCUNSmode. */
22800 cc_mode = CCUNSmode;
22802 /* Invert condition to avoid compound test if necessary. */
22803 if (rcode == GEU || rcode == LEU)
22805 invert_move = true;
22806 rcode = reverse_condition (rcode);
22808 break;
22810 default:
22811 break;
22814 /* Get the vector mask for the given relational operations. */
22815 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22817 if (!mask)
22818 return 0;
22820 if (invert_move)
22821 std::swap (op_true, op_false);
22823 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22824 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22825 && (GET_CODE (op_true) == CONST_VECTOR
22826 || GET_CODE (op_false) == CONST_VECTOR))
22828 rtx constant_0 = CONST0_RTX (dest_mode);
22829 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22831 if (op_true == constant_m1 && op_false == constant_0)
22833 emit_move_insn (dest, mask);
22834 return 1;
22837 else if (op_true == constant_0 && op_false == constant_m1)
22839 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22840 return 1;
22843 /* If we can't use the vector comparison directly, perhaps we can use
22844 the mask for the true or false fields, instead of loading up a
22845 constant. */
22846 if (op_true == constant_m1)
22847 op_true = mask;
22849 if (op_false == constant_0)
22850 op_false = mask;
22853 if (!REG_P (op_true) && !SUBREG_P (op_true))
22854 op_true = force_reg (dest_mode, op_true);
22856 if (!REG_P (op_false) && !SUBREG_P (op_false))
22857 op_false = force_reg (dest_mode, op_false);
22859 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22860 CONST0_RTX (dest_mode));
22861 emit_insn (gen_rtx_SET (dest,
22862 gen_rtx_IF_THEN_ELSE (dest_mode,
22863 cond2,
22864 op_true,
22865 op_false)));
22866 return 1;
22869 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22870 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22871 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22872 hardware has no such operation. */
22874 static int
22875 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22877 enum rtx_code code = GET_CODE (op);
22878 rtx op0 = XEXP (op, 0);
22879 rtx op1 = XEXP (op, 1);
22880 machine_mode compare_mode = GET_MODE (op0);
22881 machine_mode result_mode = GET_MODE (dest);
22882 bool max_p = false;
22884 if (result_mode != compare_mode)
22885 return 0;
22887 if (code == GE || code == GT)
22888 max_p = true;
22889 else if (code == LE || code == LT)
22890 max_p = false;
22891 else
22892 return 0;
22894 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22897 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22898 max_p = !max_p;
22900 else
22901 return 0;
22903 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22904 return 1;
22907 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22908 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22909 operands of the last comparison is nonzero/true, FALSE_COND if it is
22910 zero/false. Return 0 if the hardware has no such operation. */
22912 static int
22913 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22915 enum rtx_code code = GET_CODE (op);
22916 rtx op0 = XEXP (op, 0);
22917 rtx op1 = XEXP (op, 1);
22918 machine_mode result_mode = GET_MODE (dest);
22919 rtx compare_rtx;
22920 rtx cmove_rtx;
22921 rtx clobber_rtx;
22923 if (!can_create_pseudo_p ())
22924 return 0;
22926 switch (code)
22928 case EQ:
22929 case GE:
22930 case GT:
22931 break;
22933 case NE:
22934 case LT:
22935 case LE:
22936 code = swap_condition (code);
22937 std::swap (op0, op1);
22938 break;
22940 default:
22941 return 0;
22944 /* Generate: [(parallel [(set (dest)
22945 (if_then_else (op (cmp1) (cmp2))
22946 (true)
22947 (false)))
22948 (clobber (scratch))])]. */
22950 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22951 cmove_rtx = gen_rtx_SET (dest,
22952 gen_rtx_IF_THEN_ELSE (result_mode,
22953 compare_rtx,
22954 true_cond,
22955 false_cond));
22957 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22958 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22959 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22961 return 1;
22964 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22965 operands of the last comparison is nonzero/true, FALSE_COND if it
22966 is zero/false. Return 0 if the hardware has no such operation. */
22969 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22971 enum rtx_code code = GET_CODE (op);
22972 rtx op0 = XEXP (op, 0);
22973 rtx op1 = XEXP (op, 1);
22974 machine_mode compare_mode = GET_MODE (op0);
22975 machine_mode result_mode = GET_MODE (dest);
22976 rtx temp;
22977 bool is_against_zero;
22979 /* These modes should always match. */
22980 if (GET_MODE (op1) != compare_mode
22981 /* In the isel case however, we can use a compare immediate, so
22982 op1 may be a small constant. */
22983 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22984 return 0;
22985 if (GET_MODE (true_cond) != result_mode)
22986 return 0;
22987 if (GET_MODE (false_cond) != result_mode)
22988 return 0;
22990 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22991 if (TARGET_P9_MINMAX
22992 && (compare_mode == SFmode || compare_mode == DFmode)
22993 && (result_mode == SFmode || result_mode == DFmode))
22995 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22996 return 1;
22998 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22999 return 1;
23002 /* Don't allow using floating point comparisons for integer results for
23003 now. */
23004 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23005 return 0;
23007 /* First, work out if the hardware can do this at all, or
23008 if it's too slow.... */
23009 if (!FLOAT_MODE_P (compare_mode))
23011 if (TARGET_ISEL)
23012 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23013 return 0;
23016 is_against_zero = op1 == CONST0_RTX (compare_mode);
23018 /* A floating-point subtract might overflow, underflow, or produce
23019 an inexact result, thus changing the floating-point flags, so it
23020 can't be generated if we care about that. It's safe if one side
23021 of the construct is zero, since then no subtract will be
23022 generated. */
23023 if (SCALAR_FLOAT_MODE_P (compare_mode)
23024 && flag_trapping_math && ! is_against_zero)
23025 return 0;
23027 /* Eliminate half of the comparisons by switching operands, this
23028 makes the remaining code simpler. */
23029 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23030 || code == LTGT || code == LT || code == UNLE)
23032 code = reverse_condition_maybe_unordered (code);
23033 temp = true_cond;
23034 true_cond = false_cond;
23035 false_cond = temp;
23038 /* UNEQ and LTGT take four instructions for a comparison with zero,
23039 it'll probably be faster to use a branch here too. */
23040 if (code == UNEQ && HONOR_NANS (compare_mode))
23041 return 0;
23043 /* We're going to try to implement comparisons by performing
23044 a subtract, then comparing against zero. Unfortunately,
23045 Inf - Inf is NaN which is not zero, and so if we don't
23046 know that the operand is finite and the comparison
23047 would treat EQ different to UNORDERED, we can't do it. */
23048 if (HONOR_INFINITIES (compare_mode)
23049 && code != GT && code != UNGE
23050 && (GET_CODE (op1) != CONST_DOUBLE
23051 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23052 /* Constructs of the form (a OP b ? a : b) are safe. */
23053 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23054 || (! rtx_equal_p (op0, true_cond)
23055 && ! rtx_equal_p (op1, true_cond))))
23056 return 0;
23058 /* At this point we know we can use fsel. */
23060 /* Reduce the comparison to a comparison against zero. */
23061 if (! is_against_zero)
23063 temp = gen_reg_rtx (compare_mode);
23064 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23065 op0 = temp;
23066 op1 = CONST0_RTX (compare_mode);
23069 /* If we don't care about NaNs we can reduce some of the comparisons
23070 down to faster ones. */
23071 if (! HONOR_NANS (compare_mode))
23072 switch (code)
23074 case GT:
23075 code = LE;
23076 temp = true_cond;
23077 true_cond = false_cond;
23078 false_cond = temp;
23079 break;
23080 case UNGE:
23081 code = GE;
23082 break;
23083 case UNEQ:
23084 code = EQ;
23085 break;
23086 default:
23087 break;
23090 /* Now, reduce everything down to a GE. */
23091 switch (code)
23093 case GE:
23094 break;
23096 case LE:
23097 temp = gen_reg_rtx (compare_mode);
23098 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23099 op0 = temp;
23100 break;
23102 case ORDERED:
23103 temp = gen_reg_rtx (compare_mode);
23104 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23105 op0 = temp;
23106 break;
23108 case EQ:
23109 temp = gen_reg_rtx (compare_mode);
23110 emit_insn (gen_rtx_SET (temp,
23111 gen_rtx_NEG (compare_mode,
23112 gen_rtx_ABS (compare_mode, op0))));
23113 op0 = temp;
23114 break;
23116 case UNGE:
23117 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23118 temp = gen_reg_rtx (result_mode);
23119 emit_insn (gen_rtx_SET (temp,
23120 gen_rtx_IF_THEN_ELSE (result_mode,
23121 gen_rtx_GE (VOIDmode,
23122 op0, op1),
23123 true_cond, false_cond)));
23124 false_cond = true_cond;
23125 true_cond = temp;
23127 temp = gen_reg_rtx (compare_mode);
23128 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23129 op0 = temp;
23130 break;
23132 case GT:
23133 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23134 temp = gen_reg_rtx (result_mode);
23135 emit_insn (gen_rtx_SET (temp,
23136 gen_rtx_IF_THEN_ELSE (result_mode,
23137 gen_rtx_GE (VOIDmode,
23138 op0, op1),
23139 true_cond, false_cond)));
23140 true_cond = false_cond;
23141 false_cond = temp;
23143 temp = gen_reg_rtx (compare_mode);
23144 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23145 op0 = temp;
23146 break;
23148 default:
23149 gcc_unreachable ();
23152 emit_insn (gen_rtx_SET (dest,
23153 gen_rtx_IF_THEN_ELSE (result_mode,
23154 gen_rtx_GE (VOIDmode,
23155 op0, op1),
23156 true_cond, false_cond)));
23157 return 1;
23160 /* Same as above, but for ints (isel). */
23162 static int
23163 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23165 rtx condition_rtx, cr;
23166 machine_mode mode = GET_MODE (dest);
23167 enum rtx_code cond_code;
23168 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23169 bool signedp;
23171 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23172 return 0;
23174 /* We still have to do the compare, because isel doesn't do a
23175 compare, it just looks at the CRx bits set by a previous compare
23176 instruction. */
23177 condition_rtx = rs6000_generate_compare (op, mode);
23178 cond_code = GET_CODE (condition_rtx);
23179 cr = XEXP (condition_rtx, 0);
23180 signedp = GET_MODE (cr) == CCmode;
23182 isel_func = (mode == SImode
23183 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23184 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23186 switch (cond_code)
23188 case LT: case GT: case LTU: case GTU: case EQ:
23189 /* isel handles these directly. */
23190 break;
23192 default:
23193 /* We need to swap the sense of the comparison. */
23195 std::swap (false_cond, true_cond);
23196 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23198 break;
23201 false_cond = force_reg (mode, false_cond);
23202 if (true_cond != const0_rtx)
23203 true_cond = force_reg (mode, true_cond);
23205 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23207 return 1;
23210 const char *
23211 output_isel (rtx *operands)
23213 enum rtx_code code;
23215 code = GET_CODE (operands[1]);
23217 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23219 gcc_assert (GET_CODE (operands[2]) == REG
23220 && GET_CODE (operands[3]) == REG);
23221 PUT_CODE (operands[1], reverse_condition (code));
23222 return "isel %0,%3,%2,%j1";
23225 return "isel %0,%2,%3,%j1";
23228 void
23229 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23231 machine_mode mode = GET_MODE (op0);
23232 enum rtx_code c;
23233 rtx target;
23235 /* VSX/altivec have direct min/max insns. */
23236 if ((code == SMAX || code == SMIN)
23237 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23238 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23240 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23241 return;
23244 if (code == SMAX || code == SMIN)
23245 c = GE;
23246 else
23247 c = GEU;
23249 if (code == SMAX || code == UMAX)
23250 target = emit_conditional_move (dest, c, op0, op1, mode,
23251 op0, op1, mode, 0);
23252 else
23253 target = emit_conditional_move (dest, c, op0, op1, mode,
23254 op1, op0, mode, 0);
23255 gcc_assert (target);
23256 if (target != dest)
23257 emit_move_insn (dest, target);
23260 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23261 for the value to come from memory or if it is already loaded into a GPR. */
23263 void
23264 rs6000_split_signbit (rtx dest, rtx src)
23266 machine_mode d_mode = GET_MODE (dest);
23267 machine_mode s_mode = GET_MODE (src);
23268 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23269 rtx shift_reg = dest_di;
23271 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
23273 if (MEM_P (src))
23275 rtx mem = (WORDS_BIG_ENDIAN
23276 ? adjust_address (src, DImode, 0)
23277 : adjust_address (src, DImode, 8));
23278 emit_insn (gen_rtx_SET (dest_di, mem));
23281 else
23283 unsigned int r = reg_or_subregno (src);
23285 if (INT_REGNO_P (r))
23286 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
23288 else
23290 /* Generate the special mfvsrd instruction to get it in a GPR. */
23291 gcc_assert (VSX_REGNO_P (r));
23292 if (s_mode == KFmode)
23293 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23294 else
23295 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23299 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23300 return;
23303 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23304 COND is true. Mark the jump as unlikely to be taken. */
23306 static void
23307 emit_unlikely_jump (rtx cond, rtx label)
23309 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23310 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23311 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23314 /* A subroutine of the atomic operation splitters. Emit a load-locked
23315 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23316 the zero_extend operation. */
23318 static void
23319 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23321 rtx (*fn) (rtx, rtx) = NULL;
23323 switch (mode)
23325 case E_QImode:
23326 fn = gen_load_lockedqi;
23327 break;
23328 case E_HImode:
23329 fn = gen_load_lockedhi;
23330 break;
23331 case E_SImode:
23332 if (GET_MODE (mem) == QImode)
23333 fn = gen_load_lockedqi_si;
23334 else if (GET_MODE (mem) == HImode)
23335 fn = gen_load_lockedhi_si;
23336 else
23337 fn = gen_load_lockedsi;
23338 break;
23339 case E_DImode:
23340 fn = gen_load_lockeddi;
23341 break;
23342 case E_TImode:
23343 fn = gen_load_lockedti;
23344 break;
23345 default:
23346 gcc_unreachable ();
23348 emit_insn (fn (reg, mem));
23351 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23352 instruction in MODE. */
23354 static void
23355 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23357 rtx (*fn) (rtx, rtx, rtx) = NULL;
23359 switch (mode)
23361 case E_QImode:
23362 fn = gen_store_conditionalqi;
23363 break;
23364 case E_HImode:
23365 fn = gen_store_conditionalhi;
23366 break;
23367 case E_SImode:
23368 fn = gen_store_conditionalsi;
23369 break;
23370 case E_DImode:
23371 fn = gen_store_conditionaldi;
23372 break;
23373 case E_TImode:
23374 fn = gen_store_conditionalti;
23375 break;
23376 default:
23377 gcc_unreachable ();
23380 /* Emit sync before stwcx. to address PPC405 Erratum. */
23381 if (PPC405_ERRATUM77)
23382 emit_insn (gen_hwsync ());
23384 emit_insn (fn (res, mem, val));
23387 /* Expand barriers before and after a load_locked/store_cond sequence. */
23389 static rtx
23390 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23392 rtx addr = XEXP (mem, 0);
23394 if (!legitimate_indirect_address_p (addr, reload_completed)
23395 && !legitimate_indexed_address_p (addr, reload_completed))
23397 addr = force_reg (Pmode, addr);
23398 mem = replace_equiv_address_nv (mem, addr);
23401 switch (model)
23403 case MEMMODEL_RELAXED:
23404 case MEMMODEL_CONSUME:
23405 case MEMMODEL_ACQUIRE:
23406 break;
23407 case MEMMODEL_RELEASE:
23408 case MEMMODEL_ACQ_REL:
23409 emit_insn (gen_lwsync ());
23410 break;
23411 case MEMMODEL_SEQ_CST:
23412 emit_insn (gen_hwsync ());
23413 break;
23414 default:
23415 gcc_unreachable ();
23417 return mem;
23420 static void
23421 rs6000_post_atomic_barrier (enum memmodel model)
23423 switch (model)
23425 case MEMMODEL_RELAXED:
23426 case MEMMODEL_CONSUME:
23427 case MEMMODEL_RELEASE:
23428 break;
23429 case MEMMODEL_ACQUIRE:
23430 case MEMMODEL_ACQ_REL:
23431 case MEMMODEL_SEQ_CST:
23432 emit_insn (gen_isync ());
23433 break;
23434 default:
23435 gcc_unreachable ();
23439 /* A subroutine of the various atomic expanders. For sub-word operations,
23440 we must adjust things to operate on SImode. Given the original MEM,
23441 return a new aligned memory. Also build and return the quantities by
23442 which to shift and mask. */
23444 static rtx
23445 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23447 rtx addr, align, shift, mask, mem;
23448 HOST_WIDE_INT shift_mask;
23449 machine_mode mode = GET_MODE (orig_mem);
23451 /* For smaller modes, we have to implement this via SImode. */
23452 shift_mask = (mode == QImode ? 0x18 : 0x10);
23454 addr = XEXP (orig_mem, 0);
23455 addr = force_reg (GET_MODE (addr), addr);
23457 /* Aligned memory containing subword. Generate a new memory. We
23458 do not want any of the existing MEM_ATTR data, as we're now
23459 accessing memory outside the original object. */
23460 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23461 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23462 mem = gen_rtx_MEM (SImode, align);
23463 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23464 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23465 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23467 /* Shift amount for subword relative to aligned word. */
23468 shift = gen_reg_rtx (SImode);
23469 addr = gen_lowpart (SImode, addr);
23470 rtx tmp = gen_reg_rtx (SImode);
23471 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23472 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23473 if (BYTES_BIG_ENDIAN)
23474 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23475 shift, 1, OPTAB_LIB_WIDEN);
23476 *pshift = shift;
23478 /* Mask for insertion. */
23479 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23480 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23481 *pmask = mask;
23483 return mem;
23486 /* A subroutine of the various atomic expanders. For sub-word operands,
23487 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23489 static rtx
23490 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23492 rtx x;
23494 x = gen_reg_rtx (SImode);
23495 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23496 gen_rtx_NOT (SImode, mask),
23497 oldval)));
23499 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23501 return x;
23504 /* A subroutine of the various atomic expanders. For sub-word operands,
23505 extract WIDE to NARROW via SHIFT. */
23507 static void
23508 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23510 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23511 wide, 1, OPTAB_LIB_WIDEN);
23512 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23515 /* Expand an atomic compare and swap operation. */
23517 void
23518 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23520 rtx boolval, retval, mem, oldval, newval, cond;
23521 rtx label1, label2, x, mask, shift;
23522 machine_mode mode, orig_mode;
23523 enum memmodel mod_s, mod_f;
23524 bool is_weak;
23526 boolval = operands[0];
23527 retval = operands[1];
23528 mem = operands[2];
23529 oldval = operands[3];
23530 newval = operands[4];
23531 is_weak = (INTVAL (operands[5]) != 0);
23532 mod_s = memmodel_base (INTVAL (operands[6]));
23533 mod_f = memmodel_base (INTVAL (operands[7]));
23534 orig_mode = mode = GET_MODE (mem);
23536 mask = shift = NULL_RTX;
23537 if (mode == QImode || mode == HImode)
23539 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23540 lwarx and shift/mask operations. With power8, we need to do the
23541 comparison in SImode, but the store is still done in QI/HImode. */
23542 oldval = convert_modes (SImode, mode, oldval, 1);
23544 if (!TARGET_SYNC_HI_QI)
23546 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23548 /* Shift and mask OLDVAL into position with the word. */
23549 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23550 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23552 /* Shift and mask NEWVAL into position within the word. */
23553 newval = convert_modes (SImode, mode, newval, 1);
23554 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23555 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23558 /* Prepare to adjust the return value. */
23559 retval = gen_reg_rtx (SImode);
23560 mode = SImode;
23562 else if (reg_overlap_mentioned_p (retval, oldval))
23563 oldval = copy_to_reg (oldval);
23565 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23566 oldval = copy_to_mode_reg (mode, oldval);
23568 if (reg_overlap_mentioned_p (retval, newval))
23569 newval = copy_to_reg (newval);
23571 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23573 label1 = NULL_RTX;
23574 if (!is_weak)
23576 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23577 emit_label (XEXP (label1, 0));
23579 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23581 emit_load_locked (mode, retval, mem);
23583 x = retval;
23584 if (mask)
23585 x = expand_simple_binop (SImode, AND, retval, mask,
23586 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23588 cond = gen_reg_rtx (CCmode);
23589 /* If we have TImode, synthesize a comparison. */
23590 if (mode != TImode)
23591 x = gen_rtx_COMPARE (CCmode, x, oldval);
23592 else
23594 rtx xor1_result = gen_reg_rtx (DImode);
23595 rtx xor2_result = gen_reg_rtx (DImode);
23596 rtx or_result = gen_reg_rtx (DImode);
23597 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23598 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23599 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23600 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23602 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23603 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23604 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23605 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23608 emit_insn (gen_rtx_SET (cond, x));
23610 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23611 emit_unlikely_jump (x, label2);
23613 x = newval;
23614 if (mask)
23615 x = rs6000_mask_atomic_subword (retval, newval, mask);
23617 emit_store_conditional (orig_mode, cond, mem, x);
23619 if (!is_weak)
23621 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23622 emit_unlikely_jump (x, label1);
23625 if (!is_mm_relaxed (mod_f))
23626 emit_label (XEXP (label2, 0));
23628 rs6000_post_atomic_barrier (mod_s);
23630 if (is_mm_relaxed (mod_f))
23631 emit_label (XEXP (label2, 0));
23633 if (shift)
23634 rs6000_finish_atomic_subword (operands[1], retval, shift);
23635 else if (mode != GET_MODE (operands[1]))
23636 convert_move (operands[1], retval, 1);
23638 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23639 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23640 emit_insn (gen_rtx_SET (boolval, x));
23643 /* Expand an atomic exchange operation. */
23645 void
23646 rs6000_expand_atomic_exchange (rtx operands[])
23648 rtx retval, mem, val, cond;
23649 machine_mode mode;
23650 enum memmodel model;
23651 rtx label, x, mask, shift;
23653 retval = operands[0];
23654 mem = operands[1];
23655 val = operands[2];
23656 model = memmodel_base (INTVAL (operands[3]));
23657 mode = GET_MODE (mem);
23659 mask = shift = NULL_RTX;
23660 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23662 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23664 /* Shift and mask VAL into position with the word. */
23665 val = convert_modes (SImode, mode, val, 1);
23666 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23667 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23669 /* Prepare to adjust the return value. */
23670 retval = gen_reg_rtx (SImode);
23671 mode = SImode;
23674 mem = rs6000_pre_atomic_barrier (mem, model);
23676 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23677 emit_label (XEXP (label, 0));
23679 emit_load_locked (mode, retval, mem);
23681 x = val;
23682 if (mask)
23683 x = rs6000_mask_atomic_subword (retval, val, mask);
23685 cond = gen_reg_rtx (CCmode);
23686 emit_store_conditional (mode, cond, mem, x);
23688 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23689 emit_unlikely_jump (x, label);
23691 rs6000_post_atomic_barrier (model);
23693 if (shift)
23694 rs6000_finish_atomic_subword (operands[0], retval, shift);
23697 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23698 to perform. MEM is the memory on which to operate. VAL is the second
23699 operand of the binary operator. BEFORE and AFTER are optional locations to
23700 return the value of MEM either before of after the operation. MODEL_RTX
23701 is a CONST_INT containing the memory model to use. */
23703 void
23704 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23705 rtx orig_before, rtx orig_after, rtx model_rtx)
23707 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23708 machine_mode mode = GET_MODE (mem);
23709 machine_mode store_mode = mode;
23710 rtx label, x, cond, mask, shift;
23711 rtx before = orig_before, after = orig_after;
23713 mask = shift = NULL_RTX;
23714 /* On power8, we want to use SImode for the operation. On previous systems,
23715 use the operation in a subword and shift/mask to get the proper byte or
23716 halfword. */
23717 if (mode == QImode || mode == HImode)
23719 if (TARGET_SYNC_HI_QI)
23721 val = convert_modes (SImode, mode, val, 1);
23723 /* Prepare to adjust the return value. */
23724 before = gen_reg_rtx (SImode);
23725 if (after)
23726 after = gen_reg_rtx (SImode);
23727 mode = SImode;
23729 else
23731 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23733 /* Shift and mask VAL into position with the word. */
23734 val = convert_modes (SImode, mode, val, 1);
23735 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23736 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23738 switch (code)
23740 case IOR:
23741 case XOR:
23742 /* We've already zero-extended VAL. That is sufficient to
23743 make certain that it does not affect other bits. */
23744 mask = NULL;
23745 break;
23747 case AND:
23748 /* If we make certain that all of the other bits in VAL are
23749 set, that will be sufficient to not affect other bits. */
23750 x = gen_rtx_NOT (SImode, mask);
23751 x = gen_rtx_IOR (SImode, x, val);
23752 emit_insn (gen_rtx_SET (val, x));
23753 mask = NULL;
23754 break;
23756 case NOT:
23757 case PLUS:
23758 case MINUS:
23759 /* These will all affect bits outside the field and need
23760 adjustment via MASK within the loop. */
23761 break;
23763 default:
23764 gcc_unreachable ();
23767 /* Prepare to adjust the return value. */
23768 before = gen_reg_rtx (SImode);
23769 if (after)
23770 after = gen_reg_rtx (SImode);
23771 store_mode = mode = SImode;
23775 mem = rs6000_pre_atomic_barrier (mem, model);
23777 label = gen_label_rtx ();
23778 emit_label (label);
23779 label = gen_rtx_LABEL_REF (VOIDmode, label);
23781 if (before == NULL_RTX)
23782 before = gen_reg_rtx (mode);
23784 emit_load_locked (mode, before, mem);
23786 if (code == NOT)
23788 x = expand_simple_binop (mode, AND, before, val,
23789 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23790 after = expand_simple_unop (mode, NOT, x, after, 1);
23792 else
23794 after = expand_simple_binop (mode, code, before, val,
23795 after, 1, OPTAB_LIB_WIDEN);
23798 x = after;
23799 if (mask)
23801 x = expand_simple_binop (SImode, AND, after, mask,
23802 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23803 x = rs6000_mask_atomic_subword (before, x, mask);
23805 else if (store_mode != mode)
23806 x = convert_modes (store_mode, mode, x, 1);
23808 cond = gen_reg_rtx (CCmode);
23809 emit_store_conditional (store_mode, cond, mem, x);
23811 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23812 emit_unlikely_jump (x, label);
23814 rs6000_post_atomic_barrier (model);
23816 if (shift)
23818 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23819 then do the calcuations in a SImode register. */
23820 if (orig_before)
23821 rs6000_finish_atomic_subword (orig_before, before, shift);
23822 if (orig_after)
23823 rs6000_finish_atomic_subword (orig_after, after, shift);
23825 else if (store_mode != mode)
23827 /* QImode/HImode on machines with lbarx/lharx where we do the native
23828 operation and then do the calcuations in a SImode register. */
23829 if (orig_before)
23830 convert_move (orig_before, before, 1);
23831 if (orig_after)
23832 convert_move (orig_after, after, 1);
23834 else if (orig_after && after != orig_after)
23835 emit_move_insn (orig_after, after);
23838 /* Emit instructions to move SRC to DST. Called by splitters for
23839 multi-register moves. It will emit at most one instruction for
23840 each register that is accessed; that is, it won't emit li/lis pairs
23841 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23842 register. */
23844 void
23845 rs6000_split_multireg_move (rtx dst, rtx src)
23847 /* The register number of the first register being moved. */
23848 int reg;
23849 /* The mode that is to be moved. */
23850 machine_mode mode;
23851 /* The mode that the move is being done in, and its size. */
23852 machine_mode reg_mode;
23853 int reg_mode_size;
23854 /* The number of registers that will be moved. */
23855 int nregs;
23857 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23858 mode = GET_MODE (dst);
23859 nregs = hard_regno_nregs[reg][mode];
23860 if (FP_REGNO_P (reg))
23861 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23862 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23863 else if (ALTIVEC_REGNO_P (reg))
23864 reg_mode = V16QImode;
23865 else
23866 reg_mode = word_mode;
23867 reg_mode_size = GET_MODE_SIZE (reg_mode);
23869 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23871 /* TDmode residing in FP registers is special, since the ISA requires that
23872 the lower-numbered word of a register pair is always the most significant
23873 word, even in little-endian mode. This does not match the usual subreg
23874 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23875 the appropriate constituent registers "by hand" in little-endian mode.
23877 Note we do not need to check for destructive overlap here since TDmode
23878 can only reside in even/odd register pairs. */
23879 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23881 rtx p_src, p_dst;
23882 int i;
23884 for (i = 0; i < nregs; i++)
23886 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23887 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23888 else
23889 p_src = simplify_gen_subreg (reg_mode, src, mode,
23890 i * reg_mode_size);
23892 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23893 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23894 else
23895 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23896 i * reg_mode_size);
23898 emit_insn (gen_rtx_SET (p_dst, p_src));
23901 return;
23904 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23906 /* Move register range backwards, if we might have destructive
23907 overlap. */
23908 int i;
23909 for (i = nregs - 1; i >= 0; i--)
23910 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23911 i * reg_mode_size),
23912 simplify_gen_subreg (reg_mode, src, mode,
23913 i * reg_mode_size)));
23915 else
23917 int i;
23918 int j = -1;
23919 bool used_update = false;
23920 rtx restore_basereg = NULL_RTX;
23922 if (MEM_P (src) && INT_REGNO_P (reg))
23924 rtx breg;
23926 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23927 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23929 rtx delta_rtx;
23930 breg = XEXP (XEXP (src, 0), 0);
23931 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23932 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23933 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23934 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23935 src = replace_equiv_address (src, breg);
23937 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23939 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23941 rtx basereg = XEXP (XEXP (src, 0), 0);
23942 if (TARGET_UPDATE)
23944 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23945 emit_insn (gen_rtx_SET (ndst,
23946 gen_rtx_MEM (reg_mode,
23947 XEXP (src, 0))));
23948 used_update = true;
23950 else
23951 emit_insn (gen_rtx_SET (basereg,
23952 XEXP (XEXP (src, 0), 1)));
23953 src = replace_equiv_address (src, basereg);
23955 else
23957 rtx basereg = gen_rtx_REG (Pmode, reg);
23958 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23959 src = replace_equiv_address (src, basereg);
23963 breg = XEXP (src, 0);
23964 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23965 breg = XEXP (breg, 0);
23967 /* If the base register we are using to address memory is
23968 also a destination reg, then change that register last. */
23969 if (REG_P (breg)
23970 && REGNO (breg) >= REGNO (dst)
23971 && REGNO (breg) < REGNO (dst) + nregs)
23972 j = REGNO (breg) - REGNO (dst);
23974 else if (MEM_P (dst) && INT_REGNO_P (reg))
23976 rtx breg;
23978 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23979 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23981 rtx delta_rtx;
23982 breg = XEXP (XEXP (dst, 0), 0);
23983 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23984 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23985 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23987 /* We have to update the breg before doing the store.
23988 Use store with update, if available. */
23990 if (TARGET_UPDATE)
23992 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23993 emit_insn (TARGET_32BIT
23994 ? (TARGET_POWERPC64
23995 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23996 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23997 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23998 used_update = true;
24000 else
24001 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24002 dst = replace_equiv_address (dst, breg);
24004 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
24005 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24007 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24009 rtx basereg = XEXP (XEXP (dst, 0), 0);
24010 if (TARGET_UPDATE)
24012 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24013 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24014 XEXP (dst, 0)),
24015 nsrc));
24016 used_update = true;
24018 else
24019 emit_insn (gen_rtx_SET (basereg,
24020 XEXP (XEXP (dst, 0), 1)));
24021 dst = replace_equiv_address (dst, basereg);
24023 else
24025 rtx basereg = XEXP (XEXP (dst, 0), 0);
24026 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24027 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24028 && REG_P (basereg)
24029 && REG_P (offsetreg)
24030 && REGNO (basereg) != REGNO (offsetreg));
24031 if (REGNO (basereg) == 0)
24033 rtx tmp = offsetreg;
24034 offsetreg = basereg;
24035 basereg = tmp;
24037 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24038 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24039 dst = replace_equiv_address (dst, basereg);
24042 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24043 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
24046 for (i = 0; i < nregs; i++)
24048 /* Calculate index to next subword. */
24049 ++j;
24050 if (j == nregs)
24051 j = 0;
24053 /* If compiler already emitted move of first word by
24054 store with update, no need to do anything. */
24055 if (j == 0 && used_update)
24056 continue;
24058 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24059 j * reg_mode_size),
24060 simplify_gen_subreg (reg_mode, src, mode,
24061 j * reg_mode_size)));
24063 if (restore_basereg != NULL_RTX)
24064 emit_insn (restore_basereg);
24069 /* This page contains routines that are used to determine what the
24070 function prologue and epilogue code will do and write them out. */
24072 /* Determine whether the REG is really used. */
24074 static bool
24075 save_reg_p (int reg)
24077 /* We need to mark the PIC offset register live for the same conditions
24078 as it is set up, or otherwise it won't be saved before we clobber it. */
24080 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24082 /* When calling eh_return, we must return true for all the cases
24083 where conditional_register_usage marks the PIC offset reg
24084 call used. */
24085 if (TARGET_TOC && TARGET_MINIMAL_TOC
24086 && (crtl->calls_eh_return
24087 || df_regs_ever_live_p (reg)
24088 || !constant_pool_empty_p ()))
24089 return true;
24091 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24092 && flag_pic)
24093 return true;
24096 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24099 /* Return the first fixed-point register that is required to be
24100 saved. 32 if none. */
24103 first_reg_to_save (void)
24105 int first_reg;
24107 /* Find lowest numbered live register. */
24108 for (first_reg = 13; first_reg <= 31; first_reg++)
24109 if (save_reg_p (first_reg))
24110 break;
24112 #if TARGET_MACHO
24113 if (flag_pic
24114 && crtl->uses_pic_offset_table
24115 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24116 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24117 #endif
24119 return first_reg;
24122 /* Similar, for FP regs. */
24125 first_fp_reg_to_save (void)
24127 int first_reg;
24129 /* Find lowest numbered live register. */
24130 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24131 if (save_reg_p (first_reg))
24132 break;
24134 return first_reg;
24137 /* Similar, for AltiVec regs. */
24139 static int
24140 first_altivec_reg_to_save (void)
24142 int i;
24144 /* Stack frame remains as is unless we are in AltiVec ABI. */
24145 if (! TARGET_ALTIVEC_ABI)
24146 return LAST_ALTIVEC_REGNO + 1;
24148 /* On Darwin, the unwind routines are compiled without
24149 TARGET_ALTIVEC, and use save_world to save/restore the
24150 altivec registers when necessary. */
24151 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24152 && ! TARGET_ALTIVEC)
24153 return FIRST_ALTIVEC_REGNO + 20;
24155 /* Find lowest numbered live register. */
24156 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24157 if (save_reg_p (i))
24158 break;
24160 return i;
24163 /* Return a 32-bit mask of the AltiVec registers we need to set in
24164 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24165 the 32-bit word is 0. */
24167 static unsigned int
24168 compute_vrsave_mask (void)
24170 unsigned int i, mask = 0;
24172 /* On Darwin, the unwind routines are compiled without
24173 TARGET_ALTIVEC, and use save_world to save/restore the
24174 call-saved altivec registers when necessary. */
24175 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24176 && ! TARGET_ALTIVEC)
24177 mask |= 0xFFF;
24179 /* First, find out if we use _any_ altivec registers. */
24180 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24181 if (df_regs_ever_live_p (i))
24182 mask |= ALTIVEC_REG_BIT (i);
24184 if (mask == 0)
24185 return mask;
24187 /* Next, remove the argument registers from the set. These must
24188 be in the VRSAVE mask set by the caller, so we don't need to add
24189 them in again. More importantly, the mask we compute here is
24190 used to generate CLOBBERs in the set_vrsave insn, and we do not
24191 wish the argument registers to die. */
24192 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24193 mask &= ~ALTIVEC_REG_BIT (i);
24195 /* Similarly, remove the return value from the set. */
24197 bool yes = false;
24198 diddle_return_value (is_altivec_return_reg, &yes);
24199 if (yes)
24200 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24203 return mask;
24206 /* For a very restricted set of circumstances, we can cut down the
24207 size of prologues/epilogues by calling our own save/restore-the-world
24208 routines. */
24210 static void
24211 compute_save_world_info (rs6000_stack_t *info)
24213 info->world_save_p = 1;
24214 info->world_save_p
24215 = (WORLD_SAVE_P (info)
24216 && DEFAULT_ABI == ABI_DARWIN
24217 && !cfun->has_nonlocal_label
24218 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24219 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24220 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24221 && info->cr_save_p);
24223 /* This will not work in conjunction with sibcalls. Make sure there
24224 are none. (This check is expensive, but seldom executed.) */
24225 if (WORLD_SAVE_P (info))
24227 rtx_insn *insn;
24228 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24229 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24231 info->world_save_p = 0;
24232 break;
24236 if (WORLD_SAVE_P (info))
24238 /* Even if we're not touching VRsave, make sure there's room on the
24239 stack for it, if it looks like we're calling SAVE_WORLD, which
24240 will attempt to save it. */
24241 info->vrsave_size = 4;
24243 /* If we are going to save the world, we need to save the link register too. */
24244 info->lr_save_p = 1;
24246 /* "Save" the VRsave register too if we're saving the world. */
24247 if (info->vrsave_mask == 0)
24248 info->vrsave_mask = compute_vrsave_mask ();
24250 /* Because the Darwin register save/restore routines only handle
24251 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24252 check. */
24253 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24254 && (info->first_altivec_reg_save
24255 >= FIRST_SAVED_ALTIVEC_REGNO));
24258 return;
24262 static void
24263 is_altivec_return_reg (rtx reg, void *xyes)
24265 bool *yes = (bool *) xyes;
24266 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24267 *yes = true;
24271 /* Return whether REG is a global user reg or has been specifed by
24272 -ffixed-REG. We should not restore these, and so cannot use
24273 lmw or out-of-line restore functions if there are any. We also
24274 can't save them (well, emit frame notes for them), because frame
24275 unwinding during exception handling will restore saved registers. */
24277 static bool
24278 fixed_reg_p (int reg)
24280 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24281 backend sets it, overriding anything the user might have given. */
24282 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24283 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24284 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24285 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24286 return false;
24288 return fixed_regs[reg];
24291 /* Determine the strategy for savings/restoring registers. */
24293 enum {
24294 SAVE_MULTIPLE = 0x1,
24295 SAVE_INLINE_GPRS = 0x2,
24296 SAVE_INLINE_FPRS = 0x4,
24297 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24298 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24299 SAVE_INLINE_VRS = 0x20,
24300 REST_MULTIPLE = 0x100,
24301 REST_INLINE_GPRS = 0x200,
24302 REST_INLINE_FPRS = 0x400,
24303 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24304 REST_INLINE_VRS = 0x1000
24307 static int
24308 rs6000_savres_strategy (rs6000_stack_t *info,
24309 bool using_static_chain_p)
24311 int strategy = 0;
24313 /* Select between in-line and out-of-line save and restore of regs.
24314 First, all the obvious cases where we don't use out-of-line. */
24315 if (crtl->calls_eh_return
24316 || cfun->machine->ra_need_lr)
24317 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24318 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24319 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24321 if (info->first_gp_reg_save == 32)
24322 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24324 if (info->first_fp_reg_save == 64
24325 /* The out-of-line FP routines use double-precision stores;
24326 we can't use those routines if we don't have such stores. */
24327 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24328 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24330 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24331 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24333 /* Define cutoff for using out-of-line functions to save registers. */
24334 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24336 if (!optimize_size)
24338 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24339 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24340 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24342 else
24344 /* Prefer out-of-line restore if it will exit. */
24345 if (info->first_fp_reg_save > 61)
24346 strategy |= SAVE_INLINE_FPRS;
24347 if (info->first_gp_reg_save > 29)
24349 if (info->first_fp_reg_save == 64)
24350 strategy |= SAVE_INLINE_GPRS;
24351 else
24352 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24354 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24355 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24358 else if (DEFAULT_ABI == ABI_DARWIN)
24360 if (info->first_fp_reg_save > 60)
24361 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24362 if (info->first_gp_reg_save > 29)
24363 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24364 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24366 else
24368 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24369 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24370 || info->first_fp_reg_save > 61)
24371 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24372 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24373 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24376 /* Don't bother to try to save things out-of-line if r11 is occupied
24377 by the static chain. It would require too much fiddling and the
24378 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24379 pointer on Darwin, and AIX uses r1 or r12. */
24380 if (using_static_chain_p
24381 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24382 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24383 | SAVE_INLINE_GPRS
24384 | SAVE_INLINE_VRS);
24386 /* Don't ever restore fixed regs. That means we can't use the
24387 out-of-line register restore functions if a fixed reg is in the
24388 range of regs restored. */
24389 if (!(strategy & REST_INLINE_FPRS))
24390 for (int i = info->first_fp_reg_save; i < 64; i++)
24391 if (fixed_regs[i])
24393 strategy |= REST_INLINE_FPRS;
24394 break;
24397 /* We can only use the out-of-line routines to restore fprs if we've
24398 saved all the registers from first_fp_reg_save in the prologue.
24399 Otherwise, we risk loading garbage. Of course, if we have saved
24400 out-of-line then we know we haven't skipped any fprs. */
24401 if ((strategy & SAVE_INLINE_FPRS)
24402 && !(strategy & REST_INLINE_FPRS))
24403 for (int i = info->first_fp_reg_save; i < 64; i++)
24404 if (!save_reg_p (i))
24406 strategy |= REST_INLINE_FPRS;
24407 break;
24410 /* Similarly, for altivec regs. */
24411 if (!(strategy & REST_INLINE_VRS))
24412 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24413 if (fixed_regs[i])
24415 strategy |= REST_INLINE_VRS;
24416 break;
24419 if ((strategy & SAVE_INLINE_VRS)
24420 && !(strategy & REST_INLINE_VRS))
24421 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24422 if (!save_reg_p (i))
24424 strategy |= REST_INLINE_VRS;
24425 break;
24428 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24429 saved is an out-of-line save or restore. Set up the value for
24430 the next test (excluding out-of-line gprs). */
24431 bool lr_save_p = (info->lr_save_p
24432 || !(strategy & SAVE_INLINE_FPRS)
24433 || !(strategy & SAVE_INLINE_VRS)
24434 || !(strategy & REST_INLINE_FPRS)
24435 || !(strategy & REST_INLINE_VRS));
24437 if (TARGET_MULTIPLE
24438 && !TARGET_POWERPC64
24439 && info->first_gp_reg_save < 31
24440 && !(flag_shrink_wrap
24441 && flag_shrink_wrap_separate
24442 && optimize_function_for_speed_p (cfun)))
24444 int count = 0;
24445 for (int i = info->first_gp_reg_save; i < 32; i++)
24446 if (save_reg_p (i))
24447 count++;
24449 if (count <= 1)
24450 /* Don't use store multiple if only one reg needs to be
24451 saved. This can occur for example when the ABI_V4 pic reg
24452 (r30) needs to be saved to make calls, but r31 is not
24453 used. */
24454 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24455 else
24457 /* Prefer store multiple for saves over out-of-line
24458 routines, since the store-multiple instruction will
24459 always be smaller. */
24460 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24462 /* The situation is more complicated with load multiple.
24463 We'd prefer to use the out-of-line routines for restores,
24464 since the "exit" out-of-line routines can handle the
24465 restore of LR and the frame teardown. However if doesn't
24466 make sense to use the out-of-line routine if that is the
24467 only reason we'd need to save LR, and we can't use the
24468 "exit" out-of-line gpr restore if we have saved some
24469 fprs; In those cases it is advantageous to use load
24470 multiple when available. */
24471 if (info->first_fp_reg_save != 64 || !lr_save_p)
24472 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24476 /* Using the "exit" out-of-line routine does not improve code size
24477 if using it would require lr to be saved and if only saving one
24478 or two gprs. */
24479 else if (!lr_save_p && info->first_gp_reg_save > 29)
24480 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24482 /* Don't ever restore fixed regs. */
24483 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24484 for (int i = info->first_gp_reg_save; i < 32; i++)
24485 if (fixed_reg_p (i))
24487 strategy |= REST_INLINE_GPRS;
24488 strategy &= ~REST_MULTIPLE;
24489 break;
24492 /* We can only use load multiple or the out-of-line routines to
24493 restore gprs if we've saved all the registers from
24494 first_gp_reg_save. Otherwise, we risk loading garbage.
24495 Of course, if we have saved out-of-line or used stmw then we know
24496 we haven't skipped any gprs. */
24497 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24498 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24499 for (int i = info->first_gp_reg_save; i < 32; i++)
24500 if (!save_reg_p (i))
24502 strategy |= REST_INLINE_GPRS;
24503 strategy &= ~REST_MULTIPLE;
24504 break;
24507 if (TARGET_ELF && TARGET_64BIT)
24509 if (!(strategy & SAVE_INLINE_FPRS))
24510 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24511 else if (!(strategy & SAVE_INLINE_GPRS)
24512 && info->first_fp_reg_save == 64)
24513 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24515 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24516 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24518 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24519 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24521 return strategy;
24524 /* Calculate the stack information for the current function. This is
24525 complicated by having two separate calling sequences, the AIX calling
24526 sequence and the V.4 calling sequence.
24528 AIX (and Darwin/Mac OS X) stack frames look like:
24529 32-bit 64-bit
24530 SP----> +---------------------------------------+
24531 | back chain to caller | 0 0
24532 +---------------------------------------+
24533 | saved CR | 4 8 (8-11)
24534 +---------------------------------------+
24535 | saved LR | 8 16
24536 +---------------------------------------+
24537 | reserved for compilers | 12 24
24538 +---------------------------------------+
24539 | reserved for binders | 16 32
24540 +---------------------------------------+
24541 | saved TOC pointer | 20 40
24542 +---------------------------------------+
24543 | Parameter save area (+padding*) (P) | 24 48
24544 +---------------------------------------+
24545 | Alloca space (A) | 24+P etc.
24546 +---------------------------------------+
24547 | Local variable space (L) | 24+P+A
24548 +---------------------------------------+
24549 | Float/int conversion temporary (X) | 24+P+A+L
24550 +---------------------------------------+
24551 | Save area for AltiVec registers (W) | 24+P+A+L+X
24552 +---------------------------------------+
24553 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24554 +---------------------------------------+
24555 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24556 +---------------------------------------+
24557 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24558 +---------------------------------------+
24559 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24560 +---------------------------------------+
24561 old SP->| back chain to caller's caller |
24562 +---------------------------------------+
24564 * If the alloca area is present, the parameter save area is
24565 padded so that the former starts 16-byte aligned.
24567 The required alignment for AIX configurations is two words (i.e., 8
24568 or 16 bytes).
24570 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24572 SP----> +---------------------------------------+
24573 | Back chain to caller | 0
24574 +---------------------------------------+
24575 | Save area for CR | 8
24576 +---------------------------------------+
24577 | Saved LR | 16
24578 +---------------------------------------+
24579 | Saved TOC pointer | 24
24580 +---------------------------------------+
24581 | Parameter save area (+padding*) (P) | 32
24582 +---------------------------------------+
24583 | Alloca space (A) | 32+P
24584 +---------------------------------------+
24585 | Local variable space (L) | 32+P+A
24586 +---------------------------------------+
24587 | Save area for AltiVec registers (W) | 32+P+A+L
24588 +---------------------------------------+
24589 | AltiVec alignment padding (Y) | 32+P+A+L+W
24590 +---------------------------------------+
24591 | Save area for GP registers (G) | 32+P+A+L+W+Y
24592 +---------------------------------------+
24593 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24594 +---------------------------------------+
24595 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24596 +---------------------------------------+
24598 * If the alloca area is present, the parameter save area is
24599 padded so that the former starts 16-byte aligned.
24601 V.4 stack frames look like:
24603 SP----> +---------------------------------------+
24604 | back chain to caller | 0
24605 +---------------------------------------+
24606 | caller's saved LR | 4
24607 +---------------------------------------+
24608 | Parameter save area (+padding*) (P) | 8
24609 +---------------------------------------+
24610 | Alloca space (A) | 8+P
24611 +---------------------------------------+
24612 | Varargs save area (V) | 8+P+A
24613 +---------------------------------------+
24614 | Local variable space (L) | 8+P+A+V
24615 +---------------------------------------+
24616 | Float/int conversion temporary (X) | 8+P+A+V+L
24617 +---------------------------------------+
24618 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24619 +---------------------------------------+
24620 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24621 +---------------------------------------+
24622 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24623 +---------------------------------------+
24624 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24625 +---------------------------------------+
24626 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24627 +---------------------------------------+
24628 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24629 +---------------------------------------+
24630 old SP->| back chain to caller's caller |
24631 +---------------------------------------+
24633 * If the alloca area is present and the required alignment is
24634 16 bytes, the parameter save area is padded so that the
24635 alloca area starts 16-byte aligned.
24637 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24638 given. (But note below and in sysv4.h that we require only 8 and
24639 may round up the size of our stack frame anyways. The historical
24640 reason is early versions of powerpc-linux which didn't properly
24641 align the stack at program startup. A happy side-effect is that
24642 -mno-eabi libraries can be used with -meabi programs.)
24644 The EABI configuration defaults to the V.4 layout. However,
24645 the stack alignment requirements may differ. If -mno-eabi is not
24646 given, the required stack alignment is 8 bytes; if -mno-eabi is
24647 given, the required alignment is 16 bytes. (But see V.4 comment
24648 above.) */
24650 #ifndef ABI_STACK_BOUNDARY
24651 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24652 #endif
24654 static rs6000_stack_t *
24655 rs6000_stack_info (void)
24657 /* We should never be called for thunks, we are not set up for that. */
24658 gcc_assert (!cfun->is_thunk);
24660 rs6000_stack_t *info = &stack_info;
24661 int reg_size = TARGET_32BIT ? 4 : 8;
24662 int ehrd_size;
24663 int ehcr_size;
24664 int save_align;
24665 int first_gp;
24666 HOST_WIDE_INT non_fixed_size;
24667 bool using_static_chain_p;
24669 if (reload_completed && info->reload_completed)
24670 return info;
24672 memset (info, 0, sizeof (*info));
24673 info->reload_completed = reload_completed;
24675 /* Select which calling sequence. */
24676 info->abi = DEFAULT_ABI;
24678 /* Calculate which registers need to be saved & save area size. */
24679 info->first_gp_reg_save = first_reg_to_save ();
24680 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24681 even if it currently looks like we won't. Reload may need it to
24682 get at a constant; if so, it will have already created a constant
24683 pool entry for it. */
24684 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24685 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24686 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24687 && crtl->uses_const_pool
24688 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24689 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24690 else
24691 first_gp = info->first_gp_reg_save;
24693 info->gp_size = reg_size * (32 - first_gp);
24695 info->first_fp_reg_save = first_fp_reg_to_save ();
24696 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24698 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24699 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24700 - info->first_altivec_reg_save);
24702 /* Does this function call anything? */
24703 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24705 /* Determine if we need to save the condition code registers. */
24706 if (save_reg_p (CR2_REGNO)
24707 || save_reg_p (CR3_REGNO)
24708 || save_reg_p (CR4_REGNO))
24710 info->cr_save_p = 1;
24711 if (DEFAULT_ABI == ABI_V4)
24712 info->cr_size = reg_size;
24715 /* If the current function calls __builtin_eh_return, then we need
24716 to allocate stack space for registers that will hold data for
24717 the exception handler. */
24718 if (crtl->calls_eh_return)
24720 unsigned int i;
24721 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24722 continue;
24724 ehrd_size = i * UNITS_PER_WORD;
24726 else
24727 ehrd_size = 0;
24729 /* In the ELFv2 ABI, we also need to allocate space for separate
24730 CR field save areas if the function calls __builtin_eh_return. */
24731 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24733 /* This hard-codes that we have three call-saved CR fields. */
24734 ehcr_size = 3 * reg_size;
24735 /* We do *not* use the regular CR save mechanism. */
24736 info->cr_save_p = 0;
24738 else
24739 ehcr_size = 0;
24741 /* Determine various sizes. */
24742 info->reg_size = reg_size;
24743 info->fixed_size = RS6000_SAVE_AREA;
24744 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24745 if (cfun->calls_alloca)
24746 info->parm_size =
24747 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24748 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24749 else
24750 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24751 TARGET_ALTIVEC ? 16 : 8);
24752 if (FRAME_GROWS_DOWNWARD)
24753 info->vars_size
24754 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24755 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24756 - (info->fixed_size + info->vars_size + info->parm_size);
24758 if (TARGET_ALTIVEC_ABI)
24759 info->vrsave_mask = compute_vrsave_mask ();
24761 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24762 info->vrsave_size = 4;
24764 compute_save_world_info (info);
24766 /* Calculate the offsets. */
24767 switch (DEFAULT_ABI)
24769 case ABI_NONE:
24770 default:
24771 gcc_unreachable ();
24773 case ABI_AIX:
24774 case ABI_ELFv2:
24775 case ABI_DARWIN:
24776 info->fp_save_offset = -info->fp_size;
24777 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24779 if (TARGET_ALTIVEC_ABI)
24781 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24783 /* Align stack so vector save area is on a quadword boundary.
24784 The padding goes above the vectors. */
24785 if (info->altivec_size != 0)
24786 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24788 info->altivec_save_offset = info->vrsave_save_offset
24789 - info->altivec_padding_size
24790 - info->altivec_size;
24791 gcc_assert (info->altivec_size == 0
24792 || info->altivec_save_offset % 16 == 0);
24794 /* Adjust for AltiVec case. */
24795 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24797 else
24798 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24800 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24801 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24802 info->lr_save_offset = 2*reg_size;
24803 break;
24805 case ABI_V4:
24806 info->fp_save_offset = -info->fp_size;
24807 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24808 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24810 if (TARGET_ALTIVEC_ABI)
24812 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24814 /* Align stack so vector save area is on a quadword boundary. */
24815 if (info->altivec_size != 0)
24816 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24818 info->altivec_save_offset = info->vrsave_save_offset
24819 - info->altivec_padding_size
24820 - info->altivec_size;
24822 /* Adjust for AltiVec case. */
24823 info->ehrd_offset = info->altivec_save_offset;
24825 else
24826 info->ehrd_offset = info->cr_save_offset;
24828 info->ehrd_offset -= ehrd_size;
24829 info->lr_save_offset = reg_size;
24832 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24833 info->save_size = RS6000_ALIGN (info->fp_size
24834 + info->gp_size
24835 + info->altivec_size
24836 + info->altivec_padding_size
24837 + ehrd_size
24838 + ehcr_size
24839 + info->cr_size
24840 + info->vrsave_size,
24841 save_align);
24843 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24845 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24846 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24848 /* Determine if we need to save the link register. */
24849 if (info->calls_p
24850 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24851 && crtl->profile
24852 && !TARGET_PROFILE_KERNEL)
24853 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24854 #ifdef TARGET_RELOCATABLE
24855 || (DEFAULT_ABI == ABI_V4
24856 && (TARGET_RELOCATABLE || flag_pic > 1)
24857 && !constant_pool_empty_p ())
24858 #endif
24859 || rs6000_ra_ever_killed ())
24860 info->lr_save_p = 1;
24862 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24863 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24864 && call_used_regs[STATIC_CHAIN_REGNUM]);
24865 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24867 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24868 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24869 || !(info->savres_strategy & SAVE_INLINE_VRS)
24870 || !(info->savres_strategy & REST_INLINE_GPRS)
24871 || !(info->savres_strategy & REST_INLINE_FPRS)
24872 || !(info->savres_strategy & REST_INLINE_VRS))
24873 info->lr_save_p = 1;
24875 if (info->lr_save_p)
24876 df_set_regs_ever_live (LR_REGNO, true);
24878 /* Determine if we need to allocate any stack frame:
24880 For AIX we need to push the stack if a frame pointer is needed
24881 (because the stack might be dynamically adjusted), if we are
24882 debugging, if we make calls, or if the sum of fp_save, gp_save,
24883 and local variables are more than the space needed to save all
24884 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24885 + 18*8 = 288 (GPR13 reserved).
24887 For V.4 we don't have the stack cushion that AIX uses, but assume
24888 that the debugger can handle stackless frames. */
24890 if (info->calls_p)
24891 info->push_p = 1;
24893 else if (DEFAULT_ABI == ABI_V4)
24894 info->push_p = non_fixed_size != 0;
24896 else if (frame_pointer_needed)
24897 info->push_p = 1;
24899 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24900 info->push_p = 1;
24902 else
24903 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24905 return info;
24908 static void
24909 debug_stack_info (rs6000_stack_t *info)
24911 const char *abi_string;
24913 if (! info)
24914 info = rs6000_stack_info ();
24916 fprintf (stderr, "\nStack information for function %s:\n",
24917 ((current_function_decl && DECL_NAME (current_function_decl))
24918 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24919 : "<unknown>"));
24921 switch (info->abi)
24923 default: abi_string = "Unknown"; break;
24924 case ABI_NONE: abi_string = "NONE"; break;
24925 case ABI_AIX: abi_string = "AIX"; break;
24926 case ABI_ELFv2: abi_string = "ELFv2"; break;
24927 case ABI_DARWIN: abi_string = "Darwin"; break;
24928 case ABI_V4: abi_string = "V.4"; break;
24931 fprintf (stderr, "\tABI = %5s\n", abi_string);
24933 if (TARGET_ALTIVEC_ABI)
24934 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24936 if (info->first_gp_reg_save != 32)
24937 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24939 if (info->first_fp_reg_save != 64)
24940 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24942 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24943 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24944 info->first_altivec_reg_save);
24946 if (info->lr_save_p)
24947 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24949 if (info->cr_save_p)
24950 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24952 if (info->vrsave_mask)
24953 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24955 if (info->push_p)
24956 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24958 if (info->calls_p)
24959 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24961 if (info->gp_size)
24962 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24964 if (info->fp_size)
24965 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24967 if (info->altivec_size)
24968 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24969 info->altivec_save_offset);
24971 if (info->vrsave_size)
24972 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24973 info->vrsave_save_offset);
24975 if (info->lr_save_p)
24976 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24978 if (info->cr_save_p)
24979 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24981 if (info->varargs_save_offset)
24982 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24984 if (info->total_size)
24985 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24986 info->total_size);
24988 if (info->vars_size)
24989 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24990 info->vars_size);
24992 if (info->parm_size)
24993 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24995 if (info->fixed_size)
24996 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24998 if (info->gp_size)
24999 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25001 if (info->fp_size)
25002 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25004 if (info->altivec_size)
25005 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25007 if (info->vrsave_size)
25008 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25010 if (info->altivec_padding_size)
25011 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25012 info->altivec_padding_size);
25014 if (info->cr_size)
25015 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25017 if (info->save_size)
25018 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25020 if (info->reg_size != 4)
25021 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25023 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25025 fprintf (stderr, "\n");
25029 rs6000_return_addr (int count, rtx frame)
25031 /* Currently we don't optimize very well between prolog and body
25032 code and for PIC code the code can be actually quite bad, so
25033 don't try to be too clever here. */
25034 if (count != 0
25035 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25037 cfun->machine->ra_needs_full_frame = 1;
25039 return
25040 gen_rtx_MEM
25041 (Pmode,
25042 memory_address
25043 (Pmode,
25044 plus_constant (Pmode,
25045 copy_to_reg
25046 (gen_rtx_MEM (Pmode,
25047 memory_address (Pmode, frame))),
25048 RETURN_ADDRESS_OFFSET)));
25051 cfun->machine->ra_need_lr = 1;
25052 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25055 /* Say whether a function is a candidate for sibcall handling or not. */
25057 static bool
25058 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25060 tree fntype;
25062 if (decl)
25063 fntype = TREE_TYPE (decl);
25064 else
25065 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25067 /* We can't do it if the called function has more vector parameters
25068 than the current function; there's nowhere to put the VRsave code. */
25069 if (TARGET_ALTIVEC_ABI
25070 && TARGET_ALTIVEC_VRSAVE
25071 && !(decl && decl == current_function_decl))
25073 function_args_iterator args_iter;
25074 tree type;
25075 int nvreg = 0;
25077 /* Functions with vector parameters are required to have a
25078 prototype, so the argument type info must be available
25079 here. */
25080 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25081 if (TREE_CODE (type) == VECTOR_TYPE
25082 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25083 nvreg++;
25085 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25086 if (TREE_CODE (type) == VECTOR_TYPE
25087 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25088 nvreg--;
25090 if (nvreg > 0)
25091 return false;
25094 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25095 functions, because the callee may have a different TOC pointer to
25096 the caller and there's no way to ensure we restore the TOC when
25097 we return. With the secure-plt SYSV ABI we can't make non-local
25098 calls when -fpic/PIC because the plt call stubs use r30. */
25099 if (DEFAULT_ABI == ABI_DARWIN
25100 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25101 && decl
25102 && !DECL_EXTERNAL (decl)
25103 && !DECL_WEAK (decl)
25104 && (*targetm.binds_local_p) (decl))
25105 || (DEFAULT_ABI == ABI_V4
25106 && (!TARGET_SECURE_PLT
25107 || !flag_pic
25108 || (decl
25109 && (*targetm.binds_local_p) (decl)))))
25111 tree attr_list = TYPE_ATTRIBUTES (fntype);
25113 if (!lookup_attribute ("longcall", attr_list)
25114 || lookup_attribute ("shortcall", attr_list))
25115 return true;
25118 return false;
25121 static int
25122 rs6000_ra_ever_killed (void)
25124 rtx_insn *top;
25125 rtx reg;
25126 rtx_insn *insn;
25128 if (cfun->is_thunk)
25129 return 0;
25131 if (cfun->machine->lr_save_state)
25132 return cfun->machine->lr_save_state - 1;
25134 /* regs_ever_live has LR marked as used if any sibcalls are present,
25135 but this should not force saving and restoring in the
25136 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25137 clobbers LR, so that is inappropriate. */
25139 /* Also, the prologue can generate a store into LR that
25140 doesn't really count, like this:
25142 move LR->R0
25143 bcl to set PIC register
25144 move LR->R31
25145 move R0->LR
25147 When we're called from the epilogue, we need to avoid counting
25148 this as a store. */
25150 push_topmost_sequence ();
25151 top = get_insns ();
25152 pop_topmost_sequence ();
25153 reg = gen_rtx_REG (Pmode, LR_REGNO);
25155 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25157 if (INSN_P (insn))
25159 if (CALL_P (insn))
25161 if (!SIBLING_CALL_P (insn))
25162 return 1;
25164 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25165 return 1;
25166 else if (set_of (reg, insn) != NULL_RTX
25167 && !prologue_epilogue_contains (insn))
25168 return 1;
25171 return 0;
25174 /* Emit instructions needed to load the TOC register.
25175 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25176 a constant pool; or for SVR4 -fpic. */
25178 void
25179 rs6000_emit_load_toc_table (int fromprolog)
25181 rtx dest;
25182 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25184 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25186 char buf[30];
25187 rtx lab, tmp1, tmp2, got;
25189 lab = gen_label_rtx ();
25190 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25191 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25192 if (flag_pic == 2)
25194 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25195 need_toc_init = 1;
25197 else
25198 got = rs6000_got_sym ();
25199 tmp1 = tmp2 = dest;
25200 if (!fromprolog)
25202 tmp1 = gen_reg_rtx (Pmode);
25203 tmp2 = gen_reg_rtx (Pmode);
25205 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25206 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25207 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25208 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25210 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25212 emit_insn (gen_load_toc_v4_pic_si ());
25213 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25215 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25217 char buf[30];
25218 rtx temp0 = (fromprolog
25219 ? gen_rtx_REG (Pmode, 0)
25220 : gen_reg_rtx (Pmode));
25222 if (fromprolog)
25224 rtx symF, symL;
25226 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25227 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25229 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25230 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25232 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25233 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25234 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25236 else
25238 rtx tocsym, lab;
25240 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25241 need_toc_init = 1;
25242 lab = gen_label_rtx ();
25243 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25244 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25245 if (TARGET_LINK_STACK)
25246 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25247 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25249 emit_insn (gen_addsi3 (dest, temp0, dest));
25251 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25253 /* This is for AIX code running in non-PIC ELF32. */
25254 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25256 need_toc_init = 1;
25257 emit_insn (gen_elf_high (dest, realsym));
25258 emit_insn (gen_elf_low (dest, dest, realsym));
25260 else
25262 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25264 if (TARGET_32BIT)
25265 emit_insn (gen_load_toc_aix_si (dest));
25266 else
25267 emit_insn (gen_load_toc_aix_di (dest));
25271 /* Emit instructions to restore the link register after determining where
25272 its value has been stored. */
25274 void
25275 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25277 rs6000_stack_t *info = rs6000_stack_info ();
25278 rtx operands[2];
25280 operands[0] = source;
25281 operands[1] = scratch;
25283 if (info->lr_save_p)
25285 rtx frame_rtx = stack_pointer_rtx;
25286 HOST_WIDE_INT sp_offset = 0;
25287 rtx tmp;
25289 if (frame_pointer_needed
25290 || cfun->calls_alloca
25291 || info->total_size > 32767)
25293 tmp = gen_frame_mem (Pmode, frame_rtx);
25294 emit_move_insn (operands[1], tmp);
25295 frame_rtx = operands[1];
25297 else if (info->push_p)
25298 sp_offset = info->total_size;
25300 tmp = plus_constant (Pmode, frame_rtx,
25301 info->lr_save_offset + sp_offset);
25302 tmp = gen_frame_mem (Pmode, tmp);
25303 emit_move_insn (tmp, operands[0]);
25305 else
25306 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25308 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25309 state of lr_save_p so any change from here on would be a bug. In
25310 particular, stop rs6000_ra_ever_killed from considering the SET
25311 of lr we may have added just above. */
25312 cfun->machine->lr_save_state = info->lr_save_p + 1;
25315 static GTY(()) alias_set_type set = -1;
25317 alias_set_type
25318 get_TOC_alias_set (void)
25320 if (set == -1)
25321 set = new_alias_set ();
25322 return set;
25325 /* This returns nonzero if the current function uses the TOC. This is
25326 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25327 is generated by the ABI_V4 load_toc_* patterns. */
25328 #if TARGET_ELF
25329 static int
25330 uses_TOC (void)
25332 rtx_insn *insn;
25334 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25335 if (INSN_P (insn))
25337 rtx pat = PATTERN (insn);
25338 int i;
25340 if (GET_CODE (pat) == PARALLEL)
25341 for (i = 0; i < XVECLEN (pat, 0); i++)
25343 rtx sub = XVECEXP (pat, 0, i);
25344 if (GET_CODE (sub) == USE)
25346 sub = XEXP (sub, 0);
25347 if (GET_CODE (sub) == UNSPEC
25348 && XINT (sub, 1) == UNSPEC_TOC)
25349 return 1;
25353 return 0;
25355 #endif
25358 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25360 rtx tocrel, tocreg, hi;
25362 if (TARGET_DEBUG_ADDR)
25364 if (GET_CODE (symbol) == SYMBOL_REF)
25365 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25366 XSTR (symbol, 0));
25367 else
25369 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25370 GET_RTX_NAME (GET_CODE (symbol)));
25371 debug_rtx (symbol);
25375 if (!can_create_pseudo_p ())
25376 df_set_regs_ever_live (TOC_REGISTER, true);
25378 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25379 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25380 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25381 return tocrel;
25383 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25384 if (largetoc_reg != NULL)
25386 emit_move_insn (largetoc_reg, hi);
25387 hi = largetoc_reg;
25389 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25392 /* Issue assembly directives that create a reference to the given DWARF
25393 FRAME_TABLE_LABEL from the current function section. */
25394 void
25395 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25397 fprintf (asm_out_file, "\t.ref %s\n",
25398 (* targetm.strip_name_encoding) (frame_table_label));
25401 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25402 and the change to the stack pointer. */
25404 static void
25405 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25407 rtvec p;
25408 int i;
25409 rtx regs[3];
25411 i = 0;
25412 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25413 if (hard_frame_needed)
25414 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25415 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25416 || (hard_frame_needed
25417 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25418 regs[i++] = fp;
25420 p = rtvec_alloc (i);
25421 while (--i >= 0)
25423 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25424 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25427 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25430 /* Emit the correct code for allocating stack space, as insns.
25431 If COPY_REG, make sure a copy of the old frame is left there.
25432 The generated code may use hard register 0 as a temporary. */
25434 static rtx_insn *
25435 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25437 rtx_insn *insn;
25438 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25439 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25440 rtx todec = gen_int_mode (-size, Pmode);
25441 rtx par, set, mem;
25443 if (INTVAL (todec) != -size)
25445 warning (0, "stack frame too large");
25446 emit_insn (gen_trap ());
25447 return 0;
25450 if (crtl->limit_stack)
25452 if (REG_P (stack_limit_rtx)
25453 && REGNO (stack_limit_rtx) > 1
25454 && REGNO (stack_limit_rtx) <= 31)
25456 rtx_insn *insn
25457 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25458 gcc_assert (insn);
25459 emit_insn (insn);
25460 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25462 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25463 && TARGET_32BIT
25464 && DEFAULT_ABI == ABI_V4
25465 && !flag_pic)
25467 rtx toload = gen_rtx_CONST (VOIDmode,
25468 gen_rtx_PLUS (Pmode,
25469 stack_limit_rtx,
25470 GEN_INT (size)));
25472 emit_insn (gen_elf_high (tmp_reg, toload));
25473 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25474 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25475 const0_rtx));
25477 else
25478 warning (0, "stack limit expression is not supported");
25481 if (copy_reg)
25483 if (copy_off != 0)
25484 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25485 else
25486 emit_move_insn (copy_reg, stack_reg);
25489 if (size > 32767)
25491 /* Need a note here so that try_split doesn't get confused. */
25492 if (get_last_insn () == NULL_RTX)
25493 emit_note (NOTE_INSN_DELETED);
25494 insn = emit_move_insn (tmp_reg, todec);
25495 try_split (PATTERN (insn), insn, 0);
25496 todec = tmp_reg;
25499 insn = emit_insn (TARGET_32BIT
25500 ? gen_movsi_update_stack (stack_reg, stack_reg,
25501 todec, stack_reg)
25502 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25503 todec, stack_reg));
25504 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25505 it now and set the alias set/attributes. The above gen_*_update
25506 calls will generate a PARALLEL with the MEM set being the first
25507 operation. */
25508 par = PATTERN (insn);
25509 gcc_assert (GET_CODE (par) == PARALLEL);
25510 set = XVECEXP (par, 0, 0);
25511 gcc_assert (GET_CODE (set) == SET);
25512 mem = SET_DEST (set);
25513 gcc_assert (MEM_P (mem));
25514 MEM_NOTRAP_P (mem) = 1;
25515 set_mem_alias_set (mem, get_frame_alias_set ());
25517 RTX_FRAME_RELATED_P (insn) = 1;
25518 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25519 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25520 GEN_INT (-size))));
25521 return insn;
25524 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25526 #if PROBE_INTERVAL > 32768
25527 #error Cannot use indexed addressing mode for stack probing
25528 #endif
25530 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25531 inclusive. These are offsets from the current stack pointer. */
25533 static void
25534 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25536 /* See if we have a constant small number of probes to generate. If so,
25537 that's the easy case. */
25538 if (first + size <= 32768)
25540 HOST_WIDE_INT i;
25542 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25543 it exceeds SIZE. If only one probe is needed, this will not
25544 generate any code. Then probe at FIRST + SIZE. */
25545 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25546 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25547 -(first + i)));
25549 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25550 -(first + size)));
25553 /* Otherwise, do the same as above, but in a loop. Note that we must be
25554 extra careful with variables wrapping around because we might be at
25555 the very top (or the very bottom) of the address space and we have
25556 to be able to handle this case properly; in particular, we use an
25557 equality test for the loop condition. */
25558 else
25560 HOST_WIDE_INT rounded_size;
25561 rtx r12 = gen_rtx_REG (Pmode, 12);
25562 rtx r0 = gen_rtx_REG (Pmode, 0);
25564 /* Sanity check for the addressing mode we're going to use. */
25565 gcc_assert (first <= 32768);
25567 /* Step 1: round SIZE to the previous multiple of the interval. */
25569 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25572 /* Step 2: compute initial and final value of the loop counter. */
25574 /* TEST_ADDR = SP + FIRST. */
25575 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25576 -first)));
25578 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25579 if (rounded_size > 32768)
25581 emit_move_insn (r0, GEN_INT (-rounded_size));
25582 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25584 else
25585 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25586 -rounded_size)));
25589 /* Step 3: the loop
25593 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25594 probe at TEST_ADDR
25596 while (TEST_ADDR != LAST_ADDR)
25598 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25599 until it is equal to ROUNDED_SIZE. */
25601 if (TARGET_64BIT)
25602 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25603 else
25604 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25607 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25608 that SIZE is equal to ROUNDED_SIZE. */
25610 if (size != rounded_size)
25611 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25615 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25616 absolute addresses. */
25618 const char *
25619 output_probe_stack_range (rtx reg1, rtx reg2)
25621 static int labelno = 0;
25622 char loop_lab[32];
25623 rtx xops[2];
25625 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25627 /* Loop. */
25628 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25630 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25631 xops[0] = reg1;
25632 xops[1] = GEN_INT (-PROBE_INTERVAL);
25633 output_asm_insn ("addi %0,%0,%1", xops);
25635 /* Probe at TEST_ADDR. */
25636 xops[1] = gen_rtx_REG (Pmode, 0);
25637 output_asm_insn ("stw %1,0(%0)", xops);
25639 /* Test if TEST_ADDR == LAST_ADDR. */
25640 xops[1] = reg2;
25641 if (TARGET_64BIT)
25642 output_asm_insn ("cmpd 0,%0,%1", xops);
25643 else
25644 output_asm_insn ("cmpw 0,%0,%1", xops);
25646 /* Branch. */
25647 fputs ("\tbne 0,", asm_out_file);
25648 assemble_name_raw (asm_out_file, loop_lab);
25649 fputc ('\n', asm_out_file);
25651 return "";
25654 /* This function is called when rs6000_frame_related is processing
25655 SETs within a PARALLEL, and returns whether the REGNO save ought to
25656 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25657 for out-of-line register save functions, store multiple, and the
25658 Darwin world_save. They may contain registers that don't really
25659 need saving. */
25661 static bool
25662 interesting_frame_related_regno (unsigned int regno)
25664 /* Saves apparently of r0 are actually saving LR. It doesn't make
25665 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25666 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25667 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25668 as frame related. */
25669 if (regno == 0)
25670 return true;
25671 /* If we see CR2 then we are here on a Darwin world save. Saves of
25672 CR2 signify the whole CR is being saved. This is a long-standing
25673 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25674 that CR needs to be saved. */
25675 if (regno == CR2_REGNO)
25676 return true;
25677 /* Omit frame info for any user-defined global regs. If frame info
25678 is supplied for them, frame unwinding will restore a user reg.
25679 Also omit frame info for any reg we don't need to save, as that
25680 bloats frame info and can cause problems with shrink wrapping.
25681 Since global regs won't be seen as needing to be saved, both of
25682 these conditions are covered by save_reg_p. */
25683 return save_reg_p (regno);
25686 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25687 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25688 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25689 deduce these equivalences by itself so it wasn't necessary to hold
25690 its hand so much. Don't be tempted to always supply d2_f_d_e with
25691 the actual cfa register, ie. r31 when we are using a hard frame
25692 pointer. That fails when saving regs off r1, and sched moves the
25693 r31 setup past the reg saves. */
25695 static rtx_insn *
25696 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25697 rtx reg2, rtx repl2)
25699 rtx repl;
25701 if (REGNO (reg) == STACK_POINTER_REGNUM)
25703 gcc_checking_assert (val == 0);
25704 repl = NULL_RTX;
25706 else
25707 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25708 GEN_INT (val));
25710 rtx pat = PATTERN (insn);
25711 if (!repl && !reg2)
25713 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25714 if (GET_CODE (pat) == PARALLEL)
25715 for (int i = 0; i < XVECLEN (pat, 0); i++)
25716 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25718 rtx set = XVECEXP (pat, 0, i);
25720 if (!REG_P (SET_SRC (set))
25721 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25722 RTX_FRAME_RELATED_P (set) = 1;
25724 RTX_FRAME_RELATED_P (insn) = 1;
25725 return insn;
25728 /* We expect that 'pat' is either a SET or a PARALLEL containing
25729 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25730 are important so they all have to be marked RTX_FRAME_RELATED_P.
25731 Call simplify_replace_rtx on the SETs rather than the whole insn
25732 so as to leave the other stuff alone (for example USE of r12). */
25734 set_used_flags (pat);
25735 if (GET_CODE (pat) == SET)
25737 if (repl)
25738 pat = simplify_replace_rtx (pat, reg, repl);
25739 if (reg2)
25740 pat = simplify_replace_rtx (pat, reg2, repl2);
25742 else if (GET_CODE (pat) == PARALLEL)
25744 pat = shallow_copy_rtx (pat);
25745 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25747 for (int i = 0; i < XVECLEN (pat, 0); i++)
25748 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25750 rtx set = XVECEXP (pat, 0, i);
25752 if (repl)
25753 set = simplify_replace_rtx (set, reg, repl);
25754 if (reg2)
25755 set = simplify_replace_rtx (set, reg2, repl2);
25756 XVECEXP (pat, 0, i) = set;
25758 if (!REG_P (SET_SRC (set))
25759 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25760 RTX_FRAME_RELATED_P (set) = 1;
25763 else
25764 gcc_unreachable ();
25766 RTX_FRAME_RELATED_P (insn) = 1;
25767 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25769 return insn;
25772 /* Returns an insn that has a vrsave set operation with the
25773 appropriate CLOBBERs. */
25775 static rtx
25776 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25778 int nclobs, i;
25779 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25780 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25782 clobs[0]
25783 = gen_rtx_SET (vrsave,
25784 gen_rtx_UNSPEC_VOLATILE (SImode,
25785 gen_rtvec (2, reg, vrsave),
25786 UNSPECV_SET_VRSAVE));
25788 nclobs = 1;
25790 /* We need to clobber the registers in the mask so the scheduler
25791 does not move sets to VRSAVE before sets of AltiVec registers.
25793 However, if the function receives nonlocal gotos, reload will set
25794 all call saved registers live. We will end up with:
25796 (set (reg 999) (mem))
25797 (parallel [ (set (reg vrsave) (unspec blah))
25798 (clobber (reg 999))])
25800 The clobber will cause the store into reg 999 to be dead, and
25801 flow will attempt to delete an epilogue insn. In this case, we
25802 need an unspec use/set of the register. */
25804 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25805 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25807 if (!epiloguep || call_used_regs [i])
25808 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25809 gen_rtx_REG (V4SImode, i));
25810 else
25812 rtx reg = gen_rtx_REG (V4SImode, i);
25814 clobs[nclobs++]
25815 = gen_rtx_SET (reg,
25816 gen_rtx_UNSPEC (V4SImode,
25817 gen_rtvec (1, reg), 27));
25821 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25823 for (i = 0; i < nclobs; ++i)
25824 XVECEXP (insn, 0, i) = clobs[i];
25826 return insn;
25829 static rtx
25830 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25832 rtx addr, mem;
25834 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25835 mem = gen_frame_mem (GET_MODE (reg), addr);
25836 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25839 static rtx
25840 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25842 return gen_frame_set (reg, frame_reg, offset, false);
25845 static rtx
25846 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25848 return gen_frame_set (reg, frame_reg, offset, true);
25851 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25852 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25854 static rtx_insn *
25855 emit_frame_save (rtx frame_reg, machine_mode mode,
25856 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25858 rtx reg;
25860 /* Some cases that need register indexed addressing. */
25861 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25862 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25864 reg = gen_rtx_REG (mode, regno);
25865 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25866 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25867 NULL_RTX, NULL_RTX);
25870 /* Emit an offset memory reference suitable for a frame store, while
25871 converting to a valid addressing mode. */
25873 static rtx
25874 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25876 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25879 #ifndef TARGET_FIX_AND_CONTINUE
25880 #define TARGET_FIX_AND_CONTINUE 0
25881 #endif
25883 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25884 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25885 #define LAST_SAVRES_REGISTER 31
25886 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25888 enum {
25889 SAVRES_LR = 0x1,
25890 SAVRES_SAVE = 0x2,
25891 SAVRES_REG = 0x0c,
25892 SAVRES_GPR = 0,
25893 SAVRES_FPR = 4,
25894 SAVRES_VR = 8
25897 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25899 /* Temporary holding space for an out-of-line register save/restore
25900 routine name. */
25901 static char savres_routine_name[30];
25903 /* Return the name for an out-of-line register save/restore routine.
25904 We are saving/restoring GPRs if GPR is true. */
25906 static char *
25907 rs6000_savres_routine_name (int regno, int sel)
25909 const char *prefix = "";
25910 const char *suffix = "";
25912 /* Different targets are supposed to define
25913 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25914 routine name could be defined with:
25916 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25918 This is a nice idea in practice, but in reality, things are
25919 complicated in several ways:
25921 - ELF targets have save/restore routines for GPRs.
25923 - PPC64 ELF targets have routines for save/restore of GPRs that
25924 differ in what they do with the link register, so having a set
25925 prefix doesn't work. (We only use one of the save routines at
25926 the moment, though.)
25928 - PPC32 elf targets have "exit" versions of the restore routines
25929 that restore the link register and can save some extra space.
25930 These require an extra suffix. (There are also "tail" versions
25931 of the restore routines and "GOT" versions of the save routines,
25932 but we don't generate those at present. Same problems apply,
25933 though.)
25935 We deal with all this by synthesizing our own prefix/suffix and
25936 using that for the simple sprintf call shown above. */
25937 if (DEFAULT_ABI == ABI_V4)
25939 if (TARGET_64BIT)
25940 goto aix_names;
25942 if ((sel & SAVRES_REG) == SAVRES_GPR)
25943 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25944 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25945 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25946 else if ((sel & SAVRES_REG) == SAVRES_VR)
25947 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25948 else
25949 abort ();
25951 if ((sel & SAVRES_LR))
25952 suffix = "_x";
25954 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25956 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25957 /* No out-of-line save/restore routines for GPRs on AIX. */
25958 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25959 #endif
25961 aix_names:
25962 if ((sel & SAVRES_REG) == SAVRES_GPR)
25963 prefix = ((sel & SAVRES_SAVE)
25964 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25965 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25966 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25968 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25969 if ((sel & SAVRES_LR))
25970 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25971 else
25972 #endif
25974 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25975 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25978 else if ((sel & SAVRES_REG) == SAVRES_VR)
25979 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25980 else
25981 abort ();
25984 if (DEFAULT_ABI == ABI_DARWIN)
25986 /* The Darwin approach is (slightly) different, in order to be
25987 compatible with code generated by the system toolchain. There is a
25988 single symbol for the start of save sequence, and the code here
25989 embeds an offset into that code on the basis of the first register
25990 to be saved. */
25991 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25992 if ((sel & SAVRES_REG) == SAVRES_GPR)
25993 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25994 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25995 (regno - 13) * 4, prefix, regno);
25996 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25997 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25998 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25999 else if ((sel & SAVRES_REG) == SAVRES_VR)
26000 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26001 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26002 else
26003 abort ();
26005 else
26006 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26008 return savres_routine_name;
26011 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26012 We are saving/restoring GPRs if GPR is true. */
26014 static rtx
26015 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26017 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26018 ? info->first_gp_reg_save
26019 : (sel & SAVRES_REG) == SAVRES_FPR
26020 ? info->first_fp_reg_save - 32
26021 : (sel & SAVRES_REG) == SAVRES_VR
26022 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26023 : -1);
26024 rtx sym;
26025 int select = sel;
26027 /* Don't generate bogus routine names. */
26028 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26029 && regno <= LAST_SAVRES_REGISTER
26030 && select >= 0 && select <= 12);
26032 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26034 if (sym == NULL)
26036 char *name;
26038 name = rs6000_savres_routine_name (regno, sel);
26040 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26041 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26042 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26045 return sym;
26048 /* Emit a sequence of insns, including a stack tie if needed, for
26049 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26050 reset the stack pointer, but move the base of the frame into
26051 reg UPDT_REGNO for use by out-of-line register restore routines. */
26053 static rtx
26054 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26055 unsigned updt_regno)
26057 /* If there is nothing to do, don't do anything. */
26058 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26059 return NULL_RTX;
26061 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26063 /* This blockage is needed so that sched doesn't decide to move
26064 the sp change before the register restores. */
26065 if (DEFAULT_ABI == ABI_V4)
26066 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26067 GEN_INT (frame_off)));
26069 /* If we are restoring registers out-of-line, we will be using the
26070 "exit" variants of the restore routines, which will reset the
26071 stack for us. But we do need to point updt_reg into the
26072 right place for those routines. */
26073 if (frame_off != 0)
26074 return emit_insn (gen_add3_insn (updt_reg_rtx,
26075 frame_reg_rtx, GEN_INT (frame_off)));
26076 else
26077 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26079 return NULL_RTX;
26082 /* Return the register number used as a pointer by out-of-line
26083 save/restore functions. */
26085 static inline unsigned
26086 ptr_regno_for_savres (int sel)
26088 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26089 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26090 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26093 /* Construct a parallel rtx describing the effect of a call to an
26094 out-of-line register save/restore routine, and emit the insn
26095 or jump_insn as appropriate. */
26097 static rtx_insn *
26098 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26099 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26100 machine_mode reg_mode, int sel)
26102 int i;
26103 int offset, start_reg, end_reg, n_regs, use_reg;
26104 int reg_size = GET_MODE_SIZE (reg_mode);
26105 rtx sym;
26106 rtvec p;
26107 rtx par;
26108 rtx_insn *insn;
26110 offset = 0;
26111 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26112 ? info->first_gp_reg_save
26113 : (sel & SAVRES_REG) == SAVRES_FPR
26114 ? info->first_fp_reg_save
26115 : (sel & SAVRES_REG) == SAVRES_VR
26116 ? info->first_altivec_reg_save
26117 : -1);
26118 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26119 ? 32
26120 : (sel & SAVRES_REG) == SAVRES_FPR
26121 ? 64
26122 : (sel & SAVRES_REG) == SAVRES_VR
26123 ? LAST_ALTIVEC_REGNO + 1
26124 : -1);
26125 n_regs = end_reg - start_reg;
26126 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26127 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26128 + n_regs);
26130 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26131 RTVEC_ELT (p, offset++) = ret_rtx;
26133 RTVEC_ELT (p, offset++)
26134 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26136 sym = rs6000_savres_routine_sym (info, sel);
26137 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26139 use_reg = ptr_regno_for_savres (sel);
26140 if ((sel & SAVRES_REG) == SAVRES_VR)
26142 /* Vector regs are saved/restored using [reg+reg] addressing. */
26143 RTVEC_ELT (p, offset++)
26144 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26145 RTVEC_ELT (p, offset++)
26146 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26148 else
26149 RTVEC_ELT (p, offset++)
26150 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26152 for (i = 0; i < end_reg - start_reg; i++)
26153 RTVEC_ELT (p, i + offset)
26154 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26155 frame_reg_rtx, save_area_offset + reg_size * i,
26156 (sel & SAVRES_SAVE) != 0);
26158 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26159 RTVEC_ELT (p, i + offset)
26160 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26162 par = gen_rtx_PARALLEL (VOIDmode, p);
26164 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26166 insn = emit_jump_insn (par);
26167 JUMP_LABEL (insn) = ret_rtx;
26169 else
26170 insn = emit_insn (par);
26171 return insn;
26174 /* Emit prologue code to store CR fields that need to be saved into REG. This
26175 function should only be called when moving the non-volatile CRs to REG, it
26176 is not a general purpose routine to move the entire set of CRs to REG.
26177 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26178 volatile CRs. */
26180 static void
26181 rs6000_emit_prologue_move_from_cr (rtx reg)
26183 /* Only the ELFv2 ABI allows storing only selected fields. */
26184 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26186 int i, cr_reg[8], count = 0;
26188 /* Collect CR fields that must be saved. */
26189 for (i = 0; i < 8; i++)
26190 if (save_reg_p (CR0_REGNO + i))
26191 cr_reg[count++] = i;
26193 /* If it's just a single one, use mfcrf. */
26194 if (count == 1)
26196 rtvec p = rtvec_alloc (1);
26197 rtvec r = rtvec_alloc (2);
26198 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26199 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26200 RTVEC_ELT (p, 0)
26201 = gen_rtx_SET (reg,
26202 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26204 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26205 return;
26208 /* ??? It might be better to handle count == 2 / 3 cases here
26209 as well, using logical operations to combine the values. */
26212 emit_insn (gen_prologue_movesi_from_cr (reg));
26215 /* Return whether the split-stack arg pointer (r12) is used. */
26217 static bool
26218 split_stack_arg_pointer_used_p (void)
26220 /* If the pseudo holding the arg pointer is no longer a pseudo,
26221 then the arg pointer is used. */
26222 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26223 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26224 || (REGNO (cfun->machine->split_stack_arg_pointer)
26225 < FIRST_PSEUDO_REGISTER)))
26226 return true;
26228 /* Unfortunately we also need to do some code scanning, since
26229 r12 may have been substituted for the pseudo. */
26230 rtx_insn *insn;
26231 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26232 FOR_BB_INSNS (bb, insn)
26233 if (NONDEBUG_INSN_P (insn))
26235 /* A call destroys r12. */
26236 if (CALL_P (insn))
26237 return false;
26239 df_ref use;
26240 FOR_EACH_INSN_USE (use, insn)
26242 rtx x = DF_REF_REG (use);
26243 if (REG_P (x) && REGNO (x) == 12)
26244 return true;
26246 df_ref def;
26247 FOR_EACH_INSN_DEF (def, insn)
26249 rtx x = DF_REF_REG (def);
26250 if (REG_P (x) && REGNO (x) == 12)
26251 return false;
26254 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26257 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26259 static bool
26260 rs6000_global_entry_point_needed_p (void)
26262 /* Only needed for the ELFv2 ABI. */
26263 if (DEFAULT_ABI != ABI_ELFv2)
26264 return false;
26266 /* With -msingle-pic-base, we assume the whole program shares the same
26267 TOC, so no global entry point prologues are needed anywhere. */
26268 if (TARGET_SINGLE_PIC_BASE)
26269 return false;
26271 /* Ensure we have a global entry point for thunks. ??? We could
26272 avoid that if the target routine doesn't need a global entry point,
26273 but we do not know whether this is the case at this point. */
26274 if (cfun->is_thunk)
26275 return true;
26277 /* For regular functions, rs6000_emit_prologue sets this flag if the
26278 routine ever uses the TOC pointer. */
26279 return cfun->machine->r2_setup_needed;
26282 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26283 static sbitmap
26284 rs6000_get_separate_components (void)
26286 rs6000_stack_t *info = rs6000_stack_info ();
26288 if (WORLD_SAVE_P (info))
26289 return NULL;
26291 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26292 && !(info->savres_strategy & REST_MULTIPLE));
26294 /* Component 0 is the save/restore of LR (done via GPR0).
26295 Components 13..31 are the save/restore of GPR13..GPR31.
26296 Components 46..63 are the save/restore of FPR14..FPR31. */
26298 cfun->machine->n_components = 64;
26300 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26301 bitmap_clear (components);
26303 int reg_size = TARGET_32BIT ? 4 : 8;
26304 int fp_reg_size = 8;
26306 /* The GPRs we need saved to the frame. */
26307 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26308 && (info->savres_strategy & REST_INLINE_GPRS))
26310 int offset = info->gp_save_offset;
26311 if (info->push_p)
26312 offset += info->total_size;
26314 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26316 if (IN_RANGE (offset, -0x8000, 0x7fff)
26317 && save_reg_p (regno))
26318 bitmap_set_bit (components, regno);
26320 offset += reg_size;
26324 /* Don't mess with the hard frame pointer. */
26325 if (frame_pointer_needed)
26326 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26328 /* Don't mess with the fixed TOC register. */
26329 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26330 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26331 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26332 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26334 /* The FPRs we need saved to the frame. */
26335 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26336 && (info->savres_strategy & REST_INLINE_FPRS))
26338 int offset = info->fp_save_offset;
26339 if (info->push_p)
26340 offset += info->total_size;
26342 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26344 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26345 bitmap_set_bit (components, regno);
26347 offset += fp_reg_size;
26351 /* Optimize LR save and restore if we can. This is component 0. Any
26352 out-of-line register save/restore routines need LR. */
26353 if (info->lr_save_p
26354 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26355 && (info->savres_strategy & SAVE_INLINE_GPRS)
26356 && (info->savres_strategy & REST_INLINE_GPRS)
26357 && (info->savres_strategy & SAVE_INLINE_FPRS)
26358 && (info->savres_strategy & REST_INLINE_FPRS)
26359 && (info->savres_strategy & SAVE_INLINE_VRS)
26360 && (info->savres_strategy & REST_INLINE_VRS))
26362 int offset = info->lr_save_offset;
26363 if (info->push_p)
26364 offset += info->total_size;
26365 if (IN_RANGE (offset, -0x8000, 0x7fff))
26366 bitmap_set_bit (components, 0);
26369 return components;
26372 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26373 static sbitmap
26374 rs6000_components_for_bb (basic_block bb)
26376 rs6000_stack_t *info = rs6000_stack_info ();
26378 bitmap in = DF_LIVE_IN (bb);
26379 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26380 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26382 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26383 bitmap_clear (components);
26385 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26387 /* GPRs. */
26388 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26389 if (bitmap_bit_p (in, regno)
26390 || bitmap_bit_p (gen, regno)
26391 || bitmap_bit_p (kill, regno))
26392 bitmap_set_bit (components, regno);
26394 /* FPRs. */
26395 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26396 if (bitmap_bit_p (in, regno)
26397 || bitmap_bit_p (gen, regno)
26398 || bitmap_bit_p (kill, regno))
26399 bitmap_set_bit (components, regno);
26401 /* The link register. */
26402 if (bitmap_bit_p (in, LR_REGNO)
26403 || bitmap_bit_p (gen, LR_REGNO)
26404 || bitmap_bit_p (kill, LR_REGNO))
26405 bitmap_set_bit (components, 0);
26407 return components;
26410 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26411 static void
26412 rs6000_disqualify_components (sbitmap components, edge e,
26413 sbitmap edge_components, bool /*is_prologue*/)
26415 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26416 live where we want to place that code. */
26417 if (bitmap_bit_p (edge_components, 0)
26418 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26420 if (dump_file)
26421 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26422 "on entry to bb %d\n", e->dest->index);
26423 bitmap_clear_bit (components, 0);
26427 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26428 static void
26429 rs6000_emit_prologue_components (sbitmap components)
26431 rs6000_stack_t *info = rs6000_stack_info ();
26432 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26433 ? HARD_FRAME_POINTER_REGNUM
26434 : STACK_POINTER_REGNUM);
26436 machine_mode reg_mode = Pmode;
26437 int reg_size = TARGET_32BIT ? 4 : 8;
26438 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26439 ? DFmode : SFmode;
26440 int fp_reg_size = 8;
26442 /* Prologue for LR. */
26443 if (bitmap_bit_p (components, 0))
26445 rtx reg = gen_rtx_REG (reg_mode, 0);
26446 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26447 RTX_FRAME_RELATED_P (insn) = 1;
26448 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26450 int offset = info->lr_save_offset;
26451 if (info->push_p)
26452 offset += info->total_size;
26454 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26455 RTX_FRAME_RELATED_P (insn) = 1;
26456 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26457 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26458 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26461 /* Prologue for the GPRs. */
26462 int offset = info->gp_save_offset;
26463 if (info->push_p)
26464 offset += info->total_size;
26466 for (int i = info->first_gp_reg_save; i < 32; i++)
26468 if (bitmap_bit_p (components, i))
26470 rtx reg = gen_rtx_REG (reg_mode, i);
26471 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26472 RTX_FRAME_RELATED_P (insn) = 1;
26473 rtx set = copy_rtx (single_set (insn));
26474 add_reg_note (insn, REG_CFA_OFFSET, set);
26477 offset += reg_size;
26480 /* Prologue for the FPRs. */
26481 offset = info->fp_save_offset;
26482 if (info->push_p)
26483 offset += info->total_size;
26485 for (int i = info->first_fp_reg_save; i < 64; i++)
26487 if (bitmap_bit_p (components, i))
26489 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26490 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26491 RTX_FRAME_RELATED_P (insn) = 1;
26492 rtx set = copy_rtx (single_set (insn));
26493 add_reg_note (insn, REG_CFA_OFFSET, set);
26496 offset += fp_reg_size;
26500 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26501 static void
26502 rs6000_emit_epilogue_components (sbitmap components)
26504 rs6000_stack_t *info = rs6000_stack_info ();
26505 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26506 ? HARD_FRAME_POINTER_REGNUM
26507 : STACK_POINTER_REGNUM);
26509 machine_mode reg_mode = Pmode;
26510 int reg_size = TARGET_32BIT ? 4 : 8;
26512 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26513 ? DFmode : SFmode;
26514 int fp_reg_size = 8;
26516 /* Epilogue for the FPRs. */
26517 int offset = info->fp_save_offset;
26518 if (info->push_p)
26519 offset += info->total_size;
26521 for (int i = info->first_fp_reg_save; i < 64; i++)
26523 if (bitmap_bit_p (components, i))
26525 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26526 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26527 RTX_FRAME_RELATED_P (insn) = 1;
26528 add_reg_note (insn, REG_CFA_RESTORE, reg);
26531 offset += fp_reg_size;
26534 /* Epilogue for the GPRs. */
26535 offset = info->gp_save_offset;
26536 if (info->push_p)
26537 offset += info->total_size;
26539 for (int i = info->first_gp_reg_save; i < 32; i++)
26541 if (bitmap_bit_p (components, i))
26543 rtx reg = gen_rtx_REG (reg_mode, i);
26544 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26545 RTX_FRAME_RELATED_P (insn) = 1;
26546 add_reg_note (insn, REG_CFA_RESTORE, reg);
26549 offset += reg_size;
26552 /* Epilogue for LR. */
26553 if (bitmap_bit_p (components, 0))
26555 int offset = info->lr_save_offset;
26556 if (info->push_p)
26557 offset += info->total_size;
26559 rtx reg = gen_rtx_REG (reg_mode, 0);
26560 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26562 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26563 insn = emit_move_insn (lr, reg);
26564 RTX_FRAME_RELATED_P (insn) = 1;
26565 add_reg_note (insn, REG_CFA_RESTORE, lr);
26569 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26570 static void
26571 rs6000_set_handled_components (sbitmap components)
26573 rs6000_stack_t *info = rs6000_stack_info ();
26575 for (int i = info->first_gp_reg_save; i < 32; i++)
26576 if (bitmap_bit_p (components, i))
26577 cfun->machine->gpr_is_wrapped_separately[i] = true;
26579 for (int i = info->first_fp_reg_save; i < 64; i++)
26580 if (bitmap_bit_p (components, i))
26581 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26583 if (bitmap_bit_p (components, 0))
26584 cfun->machine->lr_is_wrapped_separately = true;
26587 /* VRSAVE is a bit vector representing which AltiVec registers
26588 are used. The OS uses this to determine which vector
26589 registers to save on a context switch. We need to save
26590 VRSAVE on the stack frame, add whatever AltiVec registers we
26591 used in this function, and do the corresponding magic in the
26592 epilogue. */
26593 static void
26594 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26595 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26597 /* Get VRSAVE into a GPR. */
26598 rtx reg = gen_rtx_REG (SImode, save_regno);
26599 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26600 if (TARGET_MACHO)
26601 emit_insn (gen_get_vrsave_internal (reg));
26602 else
26603 emit_insn (gen_rtx_SET (reg, vrsave));
26605 /* Save VRSAVE. */
26606 int offset = info->vrsave_save_offset + frame_off;
26607 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26609 /* Include the registers in the mask. */
26610 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26612 emit_insn (generate_set_vrsave (reg, info, 0));
26615 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26616 called, it left the arg pointer to the old stack in r29. Otherwise, the
26617 arg pointer is the top of the current frame. */
26618 static void
26619 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26620 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26622 cfun->machine->split_stack_argp_used = true;
26624 if (sp_adjust)
26626 rtx r12 = gen_rtx_REG (Pmode, 12);
26627 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26628 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26629 emit_insn_before (set_r12, sp_adjust);
26631 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26633 rtx r12 = gen_rtx_REG (Pmode, 12);
26634 if (frame_off == 0)
26635 emit_move_insn (r12, frame_reg_rtx);
26636 else
26637 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26640 if (info->push_p)
26642 rtx r12 = gen_rtx_REG (Pmode, 12);
26643 rtx r29 = gen_rtx_REG (Pmode, 29);
26644 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26645 rtx not_more = gen_label_rtx ();
26646 rtx jump;
26648 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26649 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26650 gen_rtx_LABEL_REF (VOIDmode, not_more),
26651 pc_rtx);
26652 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26653 JUMP_LABEL (jump) = not_more;
26654 LABEL_NUSES (not_more) += 1;
26655 emit_move_insn (r12, r29);
26656 emit_label (not_more);
26660 /* Emit function prologue as insns. */
26662 void
26663 rs6000_emit_prologue (void)
26665 rs6000_stack_t *info = rs6000_stack_info ();
26666 machine_mode reg_mode = Pmode;
26667 int reg_size = TARGET_32BIT ? 4 : 8;
26668 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26669 ? DFmode : SFmode;
26670 int fp_reg_size = 8;
26671 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26672 rtx frame_reg_rtx = sp_reg_rtx;
26673 unsigned int cr_save_regno;
26674 rtx cr_save_rtx = NULL_RTX;
26675 rtx_insn *insn;
26676 int strategy;
26677 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26678 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26679 && call_used_regs[STATIC_CHAIN_REGNUM]);
26680 int using_split_stack = (flag_split_stack
26681 && (lookup_attribute ("no_split_stack",
26682 DECL_ATTRIBUTES (cfun->decl))
26683 == NULL));
26685 /* Offset to top of frame for frame_reg and sp respectively. */
26686 HOST_WIDE_INT frame_off = 0;
26687 HOST_WIDE_INT sp_off = 0;
26688 /* sp_adjust is the stack adjusting instruction, tracked so that the
26689 insn setting up the split-stack arg pointer can be emitted just
26690 prior to it, when r12 is not used here for other purposes. */
26691 rtx_insn *sp_adjust = 0;
26693 #if CHECKING_P
26694 /* Track and check usage of r0, r11, r12. */
26695 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26696 #define START_USE(R) do \
26698 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26699 reg_inuse |= 1 << (R); \
26700 } while (0)
26701 #define END_USE(R) do \
26703 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26704 reg_inuse &= ~(1 << (R)); \
26705 } while (0)
26706 #define NOT_INUSE(R) do \
26708 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26709 } while (0)
26710 #else
26711 #define START_USE(R) do {} while (0)
26712 #define END_USE(R) do {} while (0)
26713 #define NOT_INUSE(R) do {} while (0)
26714 #endif
26716 if (DEFAULT_ABI == ABI_ELFv2
26717 && !TARGET_SINGLE_PIC_BASE)
26719 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26721 /* With -mminimal-toc we may generate an extra use of r2 below. */
26722 if (TARGET_TOC && TARGET_MINIMAL_TOC
26723 && !constant_pool_empty_p ())
26724 cfun->machine->r2_setup_needed = true;
26728 if (flag_stack_usage_info)
26729 current_function_static_stack_size = info->total_size;
26731 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26733 HOST_WIDE_INT size = info->total_size;
26735 if (crtl->is_leaf && !cfun->calls_alloca)
26737 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26738 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26739 size - STACK_CHECK_PROTECT);
26741 else if (size > 0)
26742 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26745 if (TARGET_FIX_AND_CONTINUE)
26747 /* gdb on darwin arranges to forward a function from the old
26748 address by modifying the first 5 instructions of the function
26749 to branch to the overriding function. This is necessary to
26750 permit function pointers that point to the old function to
26751 actually forward to the new function. */
26752 emit_insn (gen_nop ());
26753 emit_insn (gen_nop ());
26754 emit_insn (gen_nop ());
26755 emit_insn (gen_nop ());
26756 emit_insn (gen_nop ());
26759 /* Handle world saves specially here. */
26760 if (WORLD_SAVE_P (info))
26762 int i, j, sz;
26763 rtx treg;
26764 rtvec p;
26765 rtx reg0;
26767 /* save_world expects lr in r0. */
26768 reg0 = gen_rtx_REG (Pmode, 0);
26769 if (info->lr_save_p)
26771 insn = emit_move_insn (reg0,
26772 gen_rtx_REG (Pmode, LR_REGNO));
26773 RTX_FRAME_RELATED_P (insn) = 1;
26776 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26777 assumptions about the offsets of various bits of the stack
26778 frame. */
26779 gcc_assert (info->gp_save_offset == -220
26780 && info->fp_save_offset == -144
26781 && info->lr_save_offset == 8
26782 && info->cr_save_offset == 4
26783 && info->push_p
26784 && info->lr_save_p
26785 && (!crtl->calls_eh_return
26786 || info->ehrd_offset == -432)
26787 && info->vrsave_save_offset == -224
26788 && info->altivec_save_offset == -416);
26790 treg = gen_rtx_REG (SImode, 11);
26791 emit_move_insn (treg, GEN_INT (-info->total_size));
26793 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26794 in R11. It also clobbers R12, so beware! */
26796 /* Preserve CR2 for save_world prologues */
26797 sz = 5;
26798 sz += 32 - info->first_gp_reg_save;
26799 sz += 64 - info->first_fp_reg_save;
26800 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26801 p = rtvec_alloc (sz);
26802 j = 0;
26803 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26804 gen_rtx_REG (SImode,
26805 LR_REGNO));
26806 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26807 gen_rtx_SYMBOL_REF (Pmode,
26808 "*save_world"));
26809 /* We do floats first so that the instruction pattern matches
26810 properly. */
26811 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26812 RTVEC_ELT (p, j++)
26813 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26814 ? DFmode : SFmode,
26815 info->first_fp_reg_save + i),
26816 frame_reg_rtx,
26817 info->fp_save_offset + frame_off + 8 * i);
26818 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26819 RTVEC_ELT (p, j++)
26820 = gen_frame_store (gen_rtx_REG (V4SImode,
26821 info->first_altivec_reg_save + i),
26822 frame_reg_rtx,
26823 info->altivec_save_offset + frame_off + 16 * i);
26824 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26825 RTVEC_ELT (p, j++)
26826 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26827 frame_reg_rtx,
26828 info->gp_save_offset + frame_off + reg_size * i);
26830 /* CR register traditionally saved as CR2. */
26831 RTVEC_ELT (p, j++)
26832 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26833 frame_reg_rtx, info->cr_save_offset + frame_off);
26834 /* Explain about use of R0. */
26835 if (info->lr_save_p)
26836 RTVEC_ELT (p, j++)
26837 = gen_frame_store (reg0,
26838 frame_reg_rtx, info->lr_save_offset + frame_off);
26839 /* Explain what happens to the stack pointer. */
26841 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26842 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26845 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26846 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26847 treg, GEN_INT (-info->total_size));
26848 sp_off = frame_off = info->total_size;
26851 strategy = info->savres_strategy;
26853 /* For V.4, update stack before we do any saving and set back pointer. */
26854 if (! WORLD_SAVE_P (info)
26855 && info->push_p
26856 && (DEFAULT_ABI == ABI_V4
26857 || crtl->calls_eh_return))
26859 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26860 || !(strategy & SAVE_INLINE_GPRS)
26861 || !(strategy & SAVE_INLINE_VRS));
26862 int ptr_regno = -1;
26863 rtx ptr_reg = NULL_RTX;
26864 int ptr_off = 0;
26866 if (info->total_size < 32767)
26867 frame_off = info->total_size;
26868 else if (need_r11)
26869 ptr_regno = 11;
26870 else if (info->cr_save_p
26871 || info->lr_save_p
26872 || info->first_fp_reg_save < 64
26873 || info->first_gp_reg_save < 32
26874 || info->altivec_size != 0
26875 || info->vrsave_size != 0
26876 || crtl->calls_eh_return)
26877 ptr_regno = 12;
26878 else
26880 /* The prologue won't be saving any regs so there is no need
26881 to set up a frame register to access any frame save area.
26882 We also won't be using frame_off anywhere below, but set
26883 the correct value anyway to protect against future
26884 changes to this function. */
26885 frame_off = info->total_size;
26887 if (ptr_regno != -1)
26889 /* Set up the frame offset to that needed by the first
26890 out-of-line save function. */
26891 START_USE (ptr_regno);
26892 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26893 frame_reg_rtx = ptr_reg;
26894 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26895 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26896 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26897 ptr_off = info->gp_save_offset + info->gp_size;
26898 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26899 ptr_off = info->altivec_save_offset + info->altivec_size;
26900 frame_off = -ptr_off;
26902 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26903 ptr_reg, ptr_off);
26904 if (REGNO (frame_reg_rtx) == 12)
26905 sp_adjust = 0;
26906 sp_off = info->total_size;
26907 if (frame_reg_rtx != sp_reg_rtx)
26908 rs6000_emit_stack_tie (frame_reg_rtx, false);
26911 /* If we use the link register, get it into r0. */
26912 if (!WORLD_SAVE_P (info) && info->lr_save_p
26913 && !cfun->machine->lr_is_wrapped_separately)
26915 rtx addr, reg, mem;
26917 reg = gen_rtx_REG (Pmode, 0);
26918 START_USE (0);
26919 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26920 RTX_FRAME_RELATED_P (insn) = 1;
26922 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26923 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26925 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26926 GEN_INT (info->lr_save_offset + frame_off));
26927 mem = gen_rtx_MEM (Pmode, addr);
26928 /* This should not be of rs6000_sr_alias_set, because of
26929 __builtin_return_address. */
26931 insn = emit_move_insn (mem, reg);
26932 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26933 NULL_RTX, NULL_RTX);
26934 END_USE (0);
26938 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26939 r12 will be needed by out-of-line gpr restore. */
26940 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26941 && !(strategy & (SAVE_INLINE_GPRS
26942 | SAVE_NOINLINE_GPRS_SAVES_LR))
26943 ? 11 : 12);
26944 if (!WORLD_SAVE_P (info)
26945 && info->cr_save_p
26946 && REGNO (frame_reg_rtx) != cr_save_regno
26947 && !(using_static_chain_p && cr_save_regno == 11)
26948 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26950 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26951 START_USE (cr_save_regno);
26952 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26955 /* Do any required saving of fpr's. If only one or two to save, do
26956 it ourselves. Otherwise, call function. */
26957 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26959 int offset = info->fp_save_offset + frame_off;
26960 for (int i = info->first_fp_reg_save; i < 64; i++)
26962 if (save_reg_p (i)
26963 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26964 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26965 sp_off - frame_off);
26967 offset += fp_reg_size;
26970 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26972 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26973 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26974 unsigned ptr_regno = ptr_regno_for_savres (sel);
26975 rtx ptr_reg = frame_reg_rtx;
26977 if (REGNO (frame_reg_rtx) == ptr_regno)
26978 gcc_checking_assert (frame_off == 0);
26979 else
26981 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26982 NOT_INUSE (ptr_regno);
26983 emit_insn (gen_add3_insn (ptr_reg,
26984 frame_reg_rtx, GEN_INT (frame_off)));
26986 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26987 info->fp_save_offset,
26988 info->lr_save_offset,
26989 DFmode, sel);
26990 rs6000_frame_related (insn, ptr_reg, sp_off,
26991 NULL_RTX, NULL_RTX);
26992 if (lr)
26993 END_USE (0);
26996 /* Save GPRs. This is done as a PARALLEL if we are using
26997 the store-multiple instructions. */
26998 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27000 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27001 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27002 unsigned ptr_regno = ptr_regno_for_savres (sel);
27003 rtx ptr_reg = frame_reg_rtx;
27004 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27005 int end_save = info->gp_save_offset + info->gp_size;
27006 int ptr_off;
27008 if (ptr_regno == 12)
27009 sp_adjust = 0;
27010 if (!ptr_set_up)
27011 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27013 /* Need to adjust r11 (r12) if we saved any FPRs. */
27014 if (end_save + frame_off != 0)
27016 rtx offset = GEN_INT (end_save + frame_off);
27018 if (ptr_set_up)
27019 frame_off = -end_save;
27020 else
27021 NOT_INUSE (ptr_regno);
27022 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27024 else if (!ptr_set_up)
27026 NOT_INUSE (ptr_regno);
27027 emit_move_insn (ptr_reg, frame_reg_rtx);
27029 ptr_off = -end_save;
27030 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27031 info->gp_save_offset + ptr_off,
27032 info->lr_save_offset + ptr_off,
27033 reg_mode, sel);
27034 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27035 NULL_RTX, NULL_RTX);
27036 if (lr)
27037 END_USE (0);
27039 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27041 rtvec p;
27042 int i;
27043 p = rtvec_alloc (32 - info->first_gp_reg_save);
27044 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27045 RTVEC_ELT (p, i)
27046 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27047 frame_reg_rtx,
27048 info->gp_save_offset + frame_off + reg_size * i);
27049 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27050 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27051 NULL_RTX, NULL_RTX);
27053 else if (!WORLD_SAVE_P (info))
27055 int offset = info->gp_save_offset + frame_off;
27056 for (int i = info->first_gp_reg_save; i < 32; i++)
27058 if (save_reg_p (i)
27059 && !cfun->machine->gpr_is_wrapped_separately[i])
27060 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27061 sp_off - frame_off);
27063 offset += reg_size;
27067 if (crtl->calls_eh_return)
27069 unsigned int i;
27070 rtvec p;
27072 for (i = 0; ; ++i)
27074 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27075 if (regno == INVALID_REGNUM)
27076 break;
27079 p = rtvec_alloc (i);
27081 for (i = 0; ; ++i)
27083 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27084 if (regno == INVALID_REGNUM)
27085 break;
27087 rtx set
27088 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27089 sp_reg_rtx,
27090 info->ehrd_offset + sp_off + reg_size * (int) i);
27091 RTVEC_ELT (p, i) = set;
27092 RTX_FRAME_RELATED_P (set) = 1;
27095 insn = emit_insn (gen_blockage ());
27096 RTX_FRAME_RELATED_P (insn) = 1;
27097 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27100 /* In AIX ABI we need to make sure r2 is really saved. */
27101 if (TARGET_AIX && crtl->calls_eh_return)
27103 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27104 rtx join_insn, note;
27105 rtx_insn *save_insn;
27106 long toc_restore_insn;
27108 tmp_reg = gen_rtx_REG (Pmode, 11);
27109 tmp_reg_si = gen_rtx_REG (SImode, 11);
27110 if (using_static_chain_p)
27112 START_USE (0);
27113 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27115 else
27116 START_USE (11);
27117 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27118 /* Peek at instruction to which this function returns. If it's
27119 restoring r2, then we know we've already saved r2. We can't
27120 unconditionally save r2 because the value we have will already
27121 be updated if we arrived at this function via a plt call or
27122 toc adjusting stub. */
27123 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27124 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27125 + RS6000_TOC_SAVE_SLOT);
27126 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27127 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27128 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27129 validate_condition_mode (EQ, CCUNSmode);
27130 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27131 emit_insn (gen_rtx_SET (compare_result,
27132 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27133 toc_save_done = gen_label_rtx ();
27134 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27135 gen_rtx_EQ (VOIDmode, compare_result,
27136 const0_rtx),
27137 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27138 pc_rtx);
27139 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27140 JUMP_LABEL (jump) = toc_save_done;
27141 LABEL_NUSES (toc_save_done) += 1;
27143 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27144 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27145 sp_off - frame_off);
27147 emit_label (toc_save_done);
27149 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27150 have a CFG that has different saves along different paths.
27151 Move the note to a dummy blockage insn, which describes that
27152 R2 is unconditionally saved after the label. */
27153 /* ??? An alternate representation might be a special insn pattern
27154 containing both the branch and the store. That might let the
27155 code that minimizes the number of DW_CFA_advance opcodes better
27156 freedom in placing the annotations. */
27157 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27158 if (note)
27159 remove_note (save_insn, note);
27160 else
27161 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27162 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27163 RTX_FRAME_RELATED_P (save_insn) = 0;
27165 join_insn = emit_insn (gen_blockage ());
27166 REG_NOTES (join_insn) = note;
27167 RTX_FRAME_RELATED_P (join_insn) = 1;
27169 if (using_static_chain_p)
27171 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27172 END_USE (0);
27174 else
27175 END_USE (11);
27178 /* Save CR if we use any that must be preserved. */
27179 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27181 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27182 GEN_INT (info->cr_save_offset + frame_off));
27183 rtx mem = gen_frame_mem (SImode, addr);
27185 /* If we didn't copy cr before, do so now using r0. */
27186 if (cr_save_rtx == NULL_RTX)
27188 START_USE (0);
27189 cr_save_rtx = gen_rtx_REG (SImode, 0);
27190 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27193 /* Saving CR requires a two-instruction sequence: one instruction
27194 to move the CR to a general-purpose register, and a second
27195 instruction that stores the GPR to memory.
27197 We do not emit any DWARF CFI records for the first of these,
27198 because we cannot properly represent the fact that CR is saved in
27199 a register. One reason is that we cannot express that multiple
27200 CR fields are saved; another reason is that on 64-bit, the size
27201 of the CR register in DWARF (4 bytes) differs from the size of
27202 a general-purpose register.
27204 This means if any intervening instruction were to clobber one of
27205 the call-saved CR fields, we'd have incorrect CFI. To prevent
27206 this from happening, we mark the store to memory as a use of
27207 those CR fields, which prevents any such instruction from being
27208 scheduled in between the two instructions. */
27209 rtx crsave_v[9];
27210 int n_crsave = 0;
27211 int i;
27213 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27214 for (i = 0; i < 8; i++)
27215 if (save_reg_p (CR0_REGNO + i))
27216 crsave_v[n_crsave++]
27217 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27219 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27220 gen_rtvec_v (n_crsave, crsave_v)));
27221 END_USE (REGNO (cr_save_rtx));
27223 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27224 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27225 so we need to construct a frame expression manually. */
27226 RTX_FRAME_RELATED_P (insn) = 1;
27228 /* Update address to be stack-pointer relative, like
27229 rs6000_frame_related would do. */
27230 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27231 GEN_INT (info->cr_save_offset + sp_off));
27232 mem = gen_frame_mem (SImode, addr);
27234 if (DEFAULT_ABI == ABI_ELFv2)
27236 /* In the ELFv2 ABI we generate separate CFI records for each
27237 CR field that was actually saved. They all point to the
27238 same 32-bit stack slot. */
27239 rtx crframe[8];
27240 int n_crframe = 0;
27242 for (i = 0; i < 8; i++)
27243 if (save_reg_p (CR0_REGNO + i))
27245 crframe[n_crframe]
27246 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27248 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27249 n_crframe++;
27252 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27253 gen_rtx_PARALLEL (VOIDmode,
27254 gen_rtvec_v (n_crframe, crframe)));
27256 else
27258 /* In other ABIs, by convention, we use a single CR regnum to
27259 represent the fact that all call-saved CR fields are saved.
27260 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27261 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27262 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27266 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27267 *separate* slots if the routine calls __builtin_eh_return, so
27268 that they can be independently restored by the unwinder. */
27269 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27271 int i, cr_off = info->ehcr_offset;
27272 rtx crsave;
27274 /* ??? We might get better performance by using multiple mfocrf
27275 instructions. */
27276 crsave = gen_rtx_REG (SImode, 0);
27277 emit_insn (gen_prologue_movesi_from_cr (crsave));
27279 for (i = 0; i < 8; i++)
27280 if (!call_used_regs[CR0_REGNO + i])
27282 rtvec p = rtvec_alloc (2);
27283 RTVEC_ELT (p, 0)
27284 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27285 RTVEC_ELT (p, 1)
27286 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27288 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27290 RTX_FRAME_RELATED_P (insn) = 1;
27291 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27292 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27293 sp_reg_rtx, cr_off + sp_off));
27295 cr_off += reg_size;
27299 /* Update stack and set back pointer unless this is V.4,
27300 for which it was done previously. */
27301 if (!WORLD_SAVE_P (info) && info->push_p
27302 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27304 rtx ptr_reg = NULL;
27305 int ptr_off = 0;
27307 /* If saving altivec regs we need to be able to address all save
27308 locations using a 16-bit offset. */
27309 if ((strategy & SAVE_INLINE_VRS) == 0
27310 || (info->altivec_size != 0
27311 && (info->altivec_save_offset + info->altivec_size - 16
27312 + info->total_size - frame_off) > 32767)
27313 || (info->vrsave_size != 0
27314 && (info->vrsave_save_offset
27315 + info->total_size - frame_off) > 32767))
27317 int sel = SAVRES_SAVE | SAVRES_VR;
27318 unsigned ptr_regno = ptr_regno_for_savres (sel);
27320 if (using_static_chain_p
27321 && ptr_regno == STATIC_CHAIN_REGNUM)
27322 ptr_regno = 12;
27323 if (REGNO (frame_reg_rtx) != ptr_regno)
27324 START_USE (ptr_regno);
27325 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27326 frame_reg_rtx = ptr_reg;
27327 ptr_off = info->altivec_save_offset + info->altivec_size;
27328 frame_off = -ptr_off;
27330 else if (REGNO (frame_reg_rtx) == 1)
27331 frame_off = info->total_size;
27332 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27333 ptr_reg, ptr_off);
27334 if (REGNO (frame_reg_rtx) == 12)
27335 sp_adjust = 0;
27336 sp_off = info->total_size;
27337 if (frame_reg_rtx != sp_reg_rtx)
27338 rs6000_emit_stack_tie (frame_reg_rtx, false);
27341 /* Set frame pointer, if needed. */
27342 if (frame_pointer_needed)
27344 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27345 sp_reg_rtx);
27346 RTX_FRAME_RELATED_P (insn) = 1;
27349 /* Save AltiVec registers if needed. Save here because the red zone does
27350 not always include AltiVec registers. */
27351 if (!WORLD_SAVE_P (info)
27352 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27354 int end_save = info->altivec_save_offset + info->altivec_size;
27355 int ptr_off;
27356 /* Oddly, the vector save/restore functions point r0 at the end
27357 of the save area, then use r11 or r12 to load offsets for
27358 [reg+reg] addressing. */
27359 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27360 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27361 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27363 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27364 NOT_INUSE (0);
27365 if (scratch_regno == 12)
27366 sp_adjust = 0;
27367 if (end_save + frame_off != 0)
27369 rtx offset = GEN_INT (end_save + frame_off);
27371 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27373 else
27374 emit_move_insn (ptr_reg, frame_reg_rtx);
27376 ptr_off = -end_save;
27377 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27378 info->altivec_save_offset + ptr_off,
27379 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27380 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27381 NULL_RTX, NULL_RTX);
27382 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27384 /* The oddity mentioned above clobbered our frame reg. */
27385 emit_move_insn (frame_reg_rtx, ptr_reg);
27386 frame_off = ptr_off;
27389 else if (!WORLD_SAVE_P (info)
27390 && info->altivec_size != 0)
27392 int i;
27394 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27395 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27397 rtx areg, savereg, mem;
27398 HOST_WIDE_INT offset;
27400 offset = (info->altivec_save_offset + frame_off
27401 + 16 * (i - info->first_altivec_reg_save));
27403 savereg = gen_rtx_REG (V4SImode, i);
27405 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27407 mem = gen_frame_mem (V4SImode,
27408 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27409 GEN_INT (offset)));
27410 insn = emit_insn (gen_rtx_SET (mem, savereg));
27411 areg = NULL_RTX;
27413 else
27415 NOT_INUSE (0);
27416 areg = gen_rtx_REG (Pmode, 0);
27417 emit_move_insn (areg, GEN_INT (offset));
27419 /* AltiVec addressing mode is [reg+reg]. */
27420 mem = gen_frame_mem (V4SImode,
27421 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27423 /* Rather than emitting a generic move, force use of the stvx
27424 instruction, which we always want on ISA 2.07 (power8) systems.
27425 In particular we don't want xxpermdi/stxvd2x for little
27426 endian. */
27427 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27430 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27431 areg, GEN_INT (offset));
27435 /* VRSAVE is a bit vector representing which AltiVec registers
27436 are used. The OS uses this to determine which vector
27437 registers to save on a context switch. We need to save
27438 VRSAVE on the stack frame, add whatever AltiVec registers we
27439 used in this function, and do the corresponding magic in the
27440 epilogue. */
27442 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27444 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27445 be using r12 as frame_reg_rtx and r11 as the static chain
27446 pointer for nested functions. */
27447 int save_regno = 12;
27448 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27449 && !using_static_chain_p)
27450 save_regno = 11;
27451 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27453 save_regno = 11;
27454 if (using_static_chain_p)
27455 save_regno = 0;
27457 NOT_INUSE (save_regno);
27459 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27462 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27463 if (!TARGET_SINGLE_PIC_BASE
27464 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27465 && !constant_pool_empty_p ())
27466 || (DEFAULT_ABI == ABI_V4
27467 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27468 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27470 /* If emit_load_toc_table will use the link register, we need to save
27471 it. We use R12 for this purpose because emit_load_toc_table
27472 can use register 0. This allows us to use a plain 'blr' to return
27473 from the procedure more often. */
27474 int save_LR_around_toc_setup = (TARGET_ELF
27475 && DEFAULT_ABI == ABI_V4
27476 && flag_pic
27477 && ! info->lr_save_p
27478 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27479 if (save_LR_around_toc_setup)
27481 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27482 rtx tmp = gen_rtx_REG (Pmode, 12);
27484 sp_adjust = 0;
27485 insn = emit_move_insn (tmp, lr);
27486 RTX_FRAME_RELATED_P (insn) = 1;
27488 rs6000_emit_load_toc_table (TRUE);
27490 insn = emit_move_insn (lr, tmp);
27491 add_reg_note (insn, REG_CFA_RESTORE, lr);
27492 RTX_FRAME_RELATED_P (insn) = 1;
27494 else
27495 rs6000_emit_load_toc_table (TRUE);
27498 #if TARGET_MACHO
27499 if (!TARGET_SINGLE_PIC_BASE
27500 && DEFAULT_ABI == ABI_DARWIN
27501 && flag_pic && crtl->uses_pic_offset_table)
27503 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27504 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27506 /* Save and restore LR locally around this call (in R0). */
27507 if (!info->lr_save_p)
27508 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27510 emit_insn (gen_load_macho_picbase (src));
27512 emit_move_insn (gen_rtx_REG (Pmode,
27513 RS6000_PIC_OFFSET_TABLE_REGNUM),
27514 lr);
27516 if (!info->lr_save_p)
27517 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27519 #endif
27521 /* If we need to, save the TOC register after doing the stack setup.
27522 Do not emit eh frame info for this save. The unwinder wants info,
27523 conceptually attached to instructions in this function, about
27524 register values in the caller of this function. This R2 may have
27525 already been changed from the value in the caller.
27526 We don't attempt to write accurate DWARF EH frame info for R2
27527 because code emitted by gcc for a (non-pointer) function call
27528 doesn't save and restore R2. Instead, R2 is managed out-of-line
27529 by a linker generated plt call stub when the function resides in
27530 a shared library. This behavior is costly to describe in DWARF,
27531 both in terms of the size of DWARF info and the time taken in the
27532 unwinder to interpret it. R2 changes, apart from the
27533 calls_eh_return case earlier in this function, are handled by
27534 linux-unwind.h frob_update_context. */
27535 if (rs6000_save_toc_in_prologue_p ())
27537 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27538 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27541 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27542 if (using_split_stack && split_stack_arg_pointer_used_p ())
27543 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27546 /* Output .extern statements for the save/restore routines we use. */
27548 static void
27549 rs6000_output_savres_externs (FILE *file)
27551 rs6000_stack_t *info = rs6000_stack_info ();
27553 if (TARGET_DEBUG_STACK)
27554 debug_stack_info (info);
27556 /* Write .extern for any function we will call to save and restore
27557 fp values. */
27558 if (info->first_fp_reg_save < 64
27559 && !TARGET_MACHO
27560 && !TARGET_ELF)
27562 char *name;
27563 int regno = info->first_fp_reg_save - 32;
27565 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27567 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27568 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27569 name = rs6000_savres_routine_name (regno, sel);
27570 fprintf (file, "\t.extern %s\n", name);
27572 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27574 bool lr = (info->savres_strategy
27575 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27576 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27577 name = rs6000_savres_routine_name (regno, sel);
27578 fprintf (file, "\t.extern %s\n", name);
27583 /* Write function prologue. */
27585 static void
27586 rs6000_output_function_prologue (FILE *file)
27588 if (!cfun->is_thunk)
27589 rs6000_output_savres_externs (file);
27591 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27592 immediately after the global entry point label. */
27593 if (rs6000_global_entry_point_needed_p ())
27595 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27597 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27599 if (TARGET_CMODEL != CMODEL_LARGE)
27601 /* In the small and medium code models, we assume the TOC is less
27602 2 GB away from the text section, so it can be computed via the
27603 following two-instruction sequence. */
27604 char buf[256];
27606 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27607 fprintf (file, "0:\taddis 2,12,.TOC.-");
27608 assemble_name (file, buf);
27609 fprintf (file, "@ha\n");
27610 fprintf (file, "\taddi 2,2,.TOC.-");
27611 assemble_name (file, buf);
27612 fprintf (file, "@l\n");
27614 else
27616 /* In the large code model, we allow arbitrary offsets between the
27617 TOC and the text section, so we have to load the offset from
27618 memory. The data field is emitted directly before the global
27619 entry point in rs6000_elf_declare_function_name. */
27620 char buf[256];
27622 #ifdef HAVE_AS_ENTRY_MARKERS
27623 /* If supported by the linker, emit a marker relocation. If the
27624 total code size of the final executable or shared library
27625 happens to fit into 2 GB after all, the linker will replace
27626 this code sequence with the sequence for the small or medium
27627 code model. */
27628 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27629 #endif
27630 fprintf (file, "\tld 2,");
27631 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27632 assemble_name (file, buf);
27633 fprintf (file, "-");
27634 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27635 assemble_name (file, buf);
27636 fprintf (file, "(12)\n");
27637 fprintf (file, "\tadd 2,2,12\n");
27640 fputs ("\t.localentry\t", file);
27641 assemble_name (file, name);
27642 fputs (",.-", file);
27643 assemble_name (file, name);
27644 fputs ("\n", file);
27647 /* Output -mprofile-kernel code. This needs to be done here instead of
27648 in output_function_profile since it must go after the ELFv2 ABI
27649 local entry point. */
27650 if (TARGET_PROFILE_KERNEL && crtl->profile)
27652 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27653 gcc_assert (!TARGET_32BIT);
27655 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27657 /* In the ELFv2 ABI we have no compiler stack word. It must be
27658 the resposibility of _mcount to preserve the static chain
27659 register if required. */
27660 if (DEFAULT_ABI != ABI_ELFv2
27661 && cfun->static_chain_decl != NULL)
27663 asm_fprintf (file, "\tstd %s,24(%s)\n",
27664 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27665 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27666 asm_fprintf (file, "\tld %s,24(%s)\n",
27667 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27669 else
27670 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27673 rs6000_pic_labelno++;
27676 /* -mprofile-kernel code calls mcount before the function prolog,
27677 so a profiled leaf function should stay a leaf function. */
27678 static bool
27679 rs6000_keep_leaf_when_profiled ()
27681 return TARGET_PROFILE_KERNEL;
27684 /* Non-zero if vmx regs are restored before the frame pop, zero if
27685 we restore after the pop when possible. */
27686 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27688 /* Restoring cr is a two step process: loading a reg from the frame
27689 save, then moving the reg to cr. For ABI_V4 we must let the
27690 unwinder know that the stack location is no longer valid at or
27691 before the stack deallocation, but we can't emit a cfa_restore for
27692 cr at the stack deallocation like we do for other registers.
27693 The trouble is that it is possible for the move to cr to be
27694 scheduled after the stack deallocation. So say exactly where cr
27695 is located on each of the two insns. */
27697 static rtx
27698 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27700 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27701 rtx reg = gen_rtx_REG (SImode, regno);
27702 rtx_insn *insn = emit_move_insn (reg, mem);
27704 if (!exit_func && DEFAULT_ABI == ABI_V4)
27706 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27707 rtx set = gen_rtx_SET (reg, cr);
27709 add_reg_note (insn, REG_CFA_REGISTER, set);
27710 RTX_FRAME_RELATED_P (insn) = 1;
27712 return reg;
27715 /* Reload CR from REG. */
27717 static void
27718 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27720 int count = 0;
27721 int i;
27723 if (using_mfcr_multiple)
27725 for (i = 0; i < 8; i++)
27726 if (save_reg_p (CR0_REGNO + i))
27727 count++;
27728 gcc_assert (count);
27731 if (using_mfcr_multiple && count > 1)
27733 rtx_insn *insn;
27734 rtvec p;
27735 int ndx;
27737 p = rtvec_alloc (count);
27739 ndx = 0;
27740 for (i = 0; i < 8; i++)
27741 if (save_reg_p (CR0_REGNO + i))
27743 rtvec r = rtvec_alloc (2);
27744 RTVEC_ELT (r, 0) = reg;
27745 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27746 RTVEC_ELT (p, ndx) =
27747 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27748 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27749 ndx++;
27751 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27752 gcc_assert (ndx == count);
27754 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27755 CR field separately. */
27756 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27758 for (i = 0; i < 8; i++)
27759 if (save_reg_p (CR0_REGNO + i))
27760 add_reg_note (insn, REG_CFA_RESTORE,
27761 gen_rtx_REG (SImode, CR0_REGNO + i));
27763 RTX_FRAME_RELATED_P (insn) = 1;
27766 else
27767 for (i = 0; i < 8; i++)
27768 if (save_reg_p (CR0_REGNO + i))
27770 rtx insn = emit_insn (gen_movsi_to_cr_one
27771 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27773 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27774 CR field separately, attached to the insn that in fact
27775 restores this particular CR field. */
27776 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27778 add_reg_note (insn, REG_CFA_RESTORE,
27779 gen_rtx_REG (SImode, CR0_REGNO + i));
27781 RTX_FRAME_RELATED_P (insn) = 1;
27785 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27786 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27787 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27789 rtx_insn *insn = get_last_insn ();
27790 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27792 add_reg_note (insn, REG_CFA_RESTORE, cr);
27793 RTX_FRAME_RELATED_P (insn) = 1;
27797 /* Like cr, the move to lr instruction can be scheduled after the
27798 stack deallocation, but unlike cr, its stack frame save is still
27799 valid. So we only need to emit the cfa_restore on the correct
27800 instruction. */
27802 static void
27803 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27805 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27806 rtx reg = gen_rtx_REG (Pmode, regno);
27808 emit_move_insn (reg, mem);
27811 static void
27812 restore_saved_lr (int regno, bool exit_func)
27814 rtx reg = gen_rtx_REG (Pmode, regno);
27815 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27816 rtx_insn *insn = emit_move_insn (lr, reg);
27818 if (!exit_func && flag_shrink_wrap)
27820 add_reg_note (insn, REG_CFA_RESTORE, lr);
27821 RTX_FRAME_RELATED_P (insn) = 1;
27825 static rtx
27826 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27828 if (DEFAULT_ABI == ABI_ELFv2)
27830 int i;
27831 for (i = 0; i < 8; i++)
27832 if (save_reg_p (CR0_REGNO + i))
27834 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27835 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27836 cfa_restores);
27839 else if (info->cr_save_p)
27840 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27841 gen_rtx_REG (SImode, CR2_REGNO),
27842 cfa_restores);
27844 if (info->lr_save_p)
27845 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27846 gen_rtx_REG (Pmode, LR_REGNO),
27847 cfa_restores);
27848 return cfa_restores;
27851 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27852 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27853 below stack pointer not cloberred by signals. */
27855 static inline bool
27856 offset_below_red_zone_p (HOST_WIDE_INT offset)
27858 return offset < (DEFAULT_ABI == ABI_V4
27860 : TARGET_32BIT ? -220 : -288);
27863 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27865 static void
27866 emit_cfa_restores (rtx cfa_restores)
27868 rtx_insn *insn = get_last_insn ();
27869 rtx *loc = &REG_NOTES (insn);
27871 while (*loc)
27872 loc = &XEXP (*loc, 1);
27873 *loc = cfa_restores;
27874 RTX_FRAME_RELATED_P (insn) = 1;
27877 /* Emit function epilogue as insns. */
27879 void
27880 rs6000_emit_epilogue (int sibcall)
27882 rs6000_stack_t *info;
27883 int restoring_GPRs_inline;
27884 int restoring_FPRs_inline;
27885 int using_load_multiple;
27886 int using_mtcr_multiple;
27887 int use_backchain_to_restore_sp;
27888 int restore_lr;
27889 int strategy;
27890 HOST_WIDE_INT frame_off = 0;
27891 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27892 rtx frame_reg_rtx = sp_reg_rtx;
27893 rtx cfa_restores = NULL_RTX;
27894 rtx insn;
27895 rtx cr_save_reg = NULL_RTX;
27896 machine_mode reg_mode = Pmode;
27897 int reg_size = TARGET_32BIT ? 4 : 8;
27898 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27899 ? DFmode : SFmode;
27900 int fp_reg_size = 8;
27901 int i;
27902 bool exit_func;
27903 unsigned ptr_regno;
27905 info = rs6000_stack_info ();
27907 strategy = info->savres_strategy;
27908 using_load_multiple = strategy & REST_MULTIPLE;
27909 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27910 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27911 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
27912 || rs6000_cpu == PROCESSOR_PPC603
27913 || rs6000_cpu == PROCESSOR_PPC750
27914 || optimize_size);
27915 /* Restore via the backchain when we have a large frame, since this
27916 is more efficient than an addis, addi pair. The second condition
27917 here will not trigger at the moment; We don't actually need a
27918 frame pointer for alloca, but the generic parts of the compiler
27919 give us one anyway. */
27920 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27921 ? info->lr_save_offset
27922 : 0) > 32767
27923 || (cfun->calls_alloca
27924 && !frame_pointer_needed));
27925 restore_lr = (info->lr_save_p
27926 && (restoring_FPRs_inline
27927 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27928 && (restoring_GPRs_inline
27929 || info->first_fp_reg_save < 64)
27930 && !cfun->machine->lr_is_wrapped_separately);
27933 if (WORLD_SAVE_P (info))
27935 int i, j;
27936 char rname[30];
27937 const char *alloc_rname;
27938 rtvec p;
27940 /* eh_rest_world_r10 will return to the location saved in the LR
27941 stack slot (which is not likely to be our caller.)
27942 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27943 rest_world is similar, except any R10 parameter is ignored.
27944 The exception-handling stuff that was here in 2.95 is no
27945 longer necessary. */
27947 p = rtvec_alloc (9
27948 + 32 - info->first_gp_reg_save
27949 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27950 + 63 + 1 - info->first_fp_reg_save);
27952 strcpy (rname, ((crtl->calls_eh_return) ?
27953 "*eh_rest_world_r10" : "*rest_world"));
27954 alloc_rname = ggc_strdup (rname);
27956 j = 0;
27957 RTVEC_ELT (p, j++) = ret_rtx;
27958 RTVEC_ELT (p, j++)
27959 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27960 /* The instruction pattern requires a clobber here;
27961 it is shared with the restVEC helper. */
27962 RTVEC_ELT (p, j++)
27963 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27966 /* CR register traditionally saved as CR2. */
27967 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27968 RTVEC_ELT (p, j++)
27969 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27970 if (flag_shrink_wrap)
27972 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27973 gen_rtx_REG (Pmode, LR_REGNO),
27974 cfa_restores);
27975 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27979 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27981 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27982 RTVEC_ELT (p, j++)
27983 = gen_frame_load (reg,
27984 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27985 if (flag_shrink_wrap
27986 && save_reg_p (info->first_gp_reg_save + i))
27987 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27989 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27991 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27992 RTVEC_ELT (p, j++)
27993 = gen_frame_load (reg,
27994 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27995 if (flag_shrink_wrap
27996 && save_reg_p (info->first_altivec_reg_save + i))
27997 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27999 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28001 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28002 ? DFmode : SFmode),
28003 info->first_fp_reg_save + i);
28004 RTVEC_ELT (p, j++)
28005 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28006 if (flag_shrink_wrap
28007 && save_reg_p (info->first_fp_reg_save + i))
28008 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28010 RTVEC_ELT (p, j++)
28011 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28012 RTVEC_ELT (p, j++)
28013 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28014 RTVEC_ELT (p, j++)
28015 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28016 RTVEC_ELT (p, j++)
28017 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28018 RTVEC_ELT (p, j++)
28019 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28020 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28022 if (flag_shrink_wrap)
28024 REG_NOTES (insn) = cfa_restores;
28025 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28026 RTX_FRAME_RELATED_P (insn) = 1;
28028 return;
28031 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28032 if (info->push_p)
28033 frame_off = info->total_size;
28035 /* Restore AltiVec registers if we must do so before adjusting the
28036 stack. */
28037 if (info->altivec_size != 0
28038 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28039 || (DEFAULT_ABI != ABI_V4
28040 && offset_below_red_zone_p (info->altivec_save_offset))))
28042 int i;
28043 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28045 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28046 if (use_backchain_to_restore_sp)
28048 int frame_regno = 11;
28050 if ((strategy & REST_INLINE_VRS) == 0)
28052 /* Of r11 and r12, select the one not clobbered by an
28053 out-of-line restore function for the frame register. */
28054 frame_regno = 11 + 12 - scratch_regno;
28056 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28057 emit_move_insn (frame_reg_rtx,
28058 gen_rtx_MEM (Pmode, sp_reg_rtx));
28059 frame_off = 0;
28061 else if (frame_pointer_needed)
28062 frame_reg_rtx = hard_frame_pointer_rtx;
28064 if ((strategy & REST_INLINE_VRS) == 0)
28066 int end_save = info->altivec_save_offset + info->altivec_size;
28067 int ptr_off;
28068 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28069 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28071 if (end_save + frame_off != 0)
28073 rtx offset = GEN_INT (end_save + frame_off);
28075 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28077 else
28078 emit_move_insn (ptr_reg, frame_reg_rtx);
28080 ptr_off = -end_save;
28081 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28082 info->altivec_save_offset + ptr_off,
28083 0, V4SImode, SAVRES_VR);
28085 else
28087 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28088 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28090 rtx addr, areg, mem, insn;
28091 rtx reg = gen_rtx_REG (V4SImode, i);
28092 HOST_WIDE_INT offset
28093 = (info->altivec_save_offset + frame_off
28094 + 16 * (i - info->first_altivec_reg_save));
28096 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28098 mem = gen_frame_mem (V4SImode,
28099 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28100 GEN_INT (offset)));
28101 insn = gen_rtx_SET (reg, mem);
28103 else
28105 areg = gen_rtx_REG (Pmode, 0);
28106 emit_move_insn (areg, GEN_INT (offset));
28108 /* AltiVec addressing mode is [reg+reg]. */
28109 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28110 mem = gen_frame_mem (V4SImode, addr);
28112 /* Rather than emitting a generic move, force use of the
28113 lvx instruction, which we always want. In particular we
28114 don't want lxvd2x/xxpermdi for little endian. */
28115 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28118 (void) emit_insn (insn);
28122 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28123 if (((strategy & REST_INLINE_VRS) == 0
28124 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28125 && (flag_shrink_wrap
28126 || (offset_below_red_zone_p
28127 (info->altivec_save_offset
28128 + 16 * (i - info->first_altivec_reg_save))))
28129 && save_reg_p (i))
28131 rtx reg = gen_rtx_REG (V4SImode, i);
28132 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28136 /* Restore VRSAVE if we must do so before adjusting the stack. */
28137 if (info->vrsave_size != 0
28138 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28139 || (DEFAULT_ABI != ABI_V4
28140 && offset_below_red_zone_p (info->vrsave_save_offset))))
28142 rtx reg;
28144 if (frame_reg_rtx == sp_reg_rtx)
28146 if (use_backchain_to_restore_sp)
28148 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28149 emit_move_insn (frame_reg_rtx,
28150 gen_rtx_MEM (Pmode, sp_reg_rtx));
28151 frame_off = 0;
28153 else if (frame_pointer_needed)
28154 frame_reg_rtx = hard_frame_pointer_rtx;
28157 reg = gen_rtx_REG (SImode, 12);
28158 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28159 info->vrsave_save_offset + frame_off));
28161 emit_insn (generate_set_vrsave (reg, info, 1));
28164 insn = NULL_RTX;
28165 /* If we have a large stack frame, restore the old stack pointer
28166 using the backchain. */
28167 if (use_backchain_to_restore_sp)
28169 if (frame_reg_rtx == sp_reg_rtx)
28171 /* Under V.4, don't reset the stack pointer until after we're done
28172 loading the saved registers. */
28173 if (DEFAULT_ABI == ABI_V4)
28174 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28176 insn = emit_move_insn (frame_reg_rtx,
28177 gen_rtx_MEM (Pmode, sp_reg_rtx));
28178 frame_off = 0;
28180 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28181 && DEFAULT_ABI == ABI_V4)
28182 /* frame_reg_rtx has been set up by the altivec restore. */
28184 else
28186 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28187 frame_reg_rtx = sp_reg_rtx;
28190 /* If we have a frame pointer, we can restore the old stack pointer
28191 from it. */
28192 else if (frame_pointer_needed)
28194 frame_reg_rtx = sp_reg_rtx;
28195 if (DEFAULT_ABI == ABI_V4)
28196 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28197 /* Prevent reordering memory accesses against stack pointer restore. */
28198 else if (cfun->calls_alloca
28199 || offset_below_red_zone_p (-info->total_size))
28200 rs6000_emit_stack_tie (frame_reg_rtx, true);
28202 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28203 GEN_INT (info->total_size)));
28204 frame_off = 0;
28206 else if (info->push_p
28207 && DEFAULT_ABI != ABI_V4
28208 && !crtl->calls_eh_return)
28210 /* Prevent reordering memory accesses against stack pointer restore. */
28211 if (cfun->calls_alloca
28212 || offset_below_red_zone_p (-info->total_size))
28213 rs6000_emit_stack_tie (frame_reg_rtx, false);
28214 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28215 GEN_INT (info->total_size)));
28216 frame_off = 0;
28218 if (insn && frame_reg_rtx == sp_reg_rtx)
28220 if (cfa_restores)
28222 REG_NOTES (insn) = cfa_restores;
28223 cfa_restores = NULL_RTX;
28225 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28226 RTX_FRAME_RELATED_P (insn) = 1;
28229 /* Restore AltiVec registers if we have not done so already. */
28230 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28231 && info->altivec_size != 0
28232 && (DEFAULT_ABI == ABI_V4
28233 || !offset_below_red_zone_p (info->altivec_save_offset)))
28235 int i;
28237 if ((strategy & REST_INLINE_VRS) == 0)
28239 int end_save = info->altivec_save_offset + info->altivec_size;
28240 int ptr_off;
28241 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28242 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28243 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28245 if (end_save + frame_off != 0)
28247 rtx offset = GEN_INT (end_save + frame_off);
28249 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28251 else
28252 emit_move_insn (ptr_reg, frame_reg_rtx);
28254 ptr_off = -end_save;
28255 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28256 info->altivec_save_offset + ptr_off,
28257 0, V4SImode, SAVRES_VR);
28258 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28260 /* Frame reg was clobbered by out-of-line save. Restore it
28261 from ptr_reg, and if we are calling out-of-line gpr or
28262 fpr restore set up the correct pointer and offset. */
28263 unsigned newptr_regno = 1;
28264 if (!restoring_GPRs_inline)
28266 bool lr = info->gp_save_offset + info->gp_size == 0;
28267 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28268 newptr_regno = ptr_regno_for_savres (sel);
28269 end_save = info->gp_save_offset + info->gp_size;
28271 else if (!restoring_FPRs_inline)
28273 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28274 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28275 newptr_regno = ptr_regno_for_savres (sel);
28276 end_save = info->fp_save_offset + info->fp_size;
28279 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28280 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28282 if (end_save + ptr_off != 0)
28284 rtx offset = GEN_INT (end_save + ptr_off);
28286 frame_off = -end_save;
28287 if (TARGET_32BIT)
28288 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28289 ptr_reg, offset));
28290 else
28291 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28292 ptr_reg, offset));
28294 else
28296 frame_off = ptr_off;
28297 emit_move_insn (frame_reg_rtx, ptr_reg);
28301 else
28303 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28304 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28306 rtx addr, areg, mem, insn;
28307 rtx reg = gen_rtx_REG (V4SImode, i);
28308 HOST_WIDE_INT offset
28309 = (info->altivec_save_offset + frame_off
28310 + 16 * (i - info->first_altivec_reg_save));
28312 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28314 mem = gen_frame_mem (V4SImode,
28315 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28316 GEN_INT (offset)));
28317 insn = gen_rtx_SET (reg, mem);
28319 else
28321 areg = gen_rtx_REG (Pmode, 0);
28322 emit_move_insn (areg, GEN_INT (offset));
28324 /* AltiVec addressing mode is [reg+reg]. */
28325 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28326 mem = gen_frame_mem (V4SImode, addr);
28328 /* Rather than emitting a generic move, force use of the
28329 lvx instruction, which we always want. In particular we
28330 don't want lxvd2x/xxpermdi for little endian. */
28331 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28334 (void) emit_insn (insn);
28338 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28339 if (((strategy & REST_INLINE_VRS) == 0
28340 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28341 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28342 && save_reg_p (i))
28344 rtx reg = gen_rtx_REG (V4SImode, i);
28345 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28349 /* Restore VRSAVE if we have not done so already. */
28350 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28351 && info->vrsave_size != 0
28352 && (DEFAULT_ABI == ABI_V4
28353 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28355 rtx reg;
28357 reg = gen_rtx_REG (SImode, 12);
28358 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28359 info->vrsave_save_offset + frame_off));
28361 emit_insn (generate_set_vrsave (reg, info, 1));
28364 /* If we exit by an out-of-line restore function on ABI_V4 then that
28365 function will deallocate the stack, so we don't need to worry
28366 about the unwinder restoring cr from an invalid stack frame
28367 location. */
28368 exit_func = (!restoring_FPRs_inline
28369 || (!restoring_GPRs_inline
28370 && info->first_fp_reg_save == 64));
28372 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28373 *separate* slots if the routine calls __builtin_eh_return, so
28374 that they can be independently restored by the unwinder. */
28375 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28377 int i, cr_off = info->ehcr_offset;
28379 for (i = 0; i < 8; i++)
28380 if (!call_used_regs[CR0_REGNO + i])
28382 rtx reg = gen_rtx_REG (SImode, 0);
28383 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28384 cr_off + frame_off));
28386 insn = emit_insn (gen_movsi_to_cr_one
28387 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28389 if (!exit_func && flag_shrink_wrap)
28391 add_reg_note (insn, REG_CFA_RESTORE,
28392 gen_rtx_REG (SImode, CR0_REGNO + i));
28394 RTX_FRAME_RELATED_P (insn) = 1;
28397 cr_off += reg_size;
28401 /* Get the old lr if we saved it. If we are restoring registers
28402 out-of-line, then the out-of-line routines can do this for us. */
28403 if (restore_lr && restoring_GPRs_inline)
28404 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28406 /* Get the old cr if we saved it. */
28407 if (info->cr_save_p)
28409 unsigned cr_save_regno = 12;
28411 if (!restoring_GPRs_inline)
28413 /* Ensure we don't use the register used by the out-of-line
28414 gpr register restore below. */
28415 bool lr = info->gp_save_offset + info->gp_size == 0;
28416 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28417 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28419 if (gpr_ptr_regno == 12)
28420 cr_save_regno = 11;
28421 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28423 else if (REGNO (frame_reg_rtx) == 12)
28424 cr_save_regno = 11;
28426 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28427 info->cr_save_offset + frame_off,
28428 exit_func);
28431 /* Set LR here to try to overlap restores below. */
28432 if (restore_lr && restoring_GPRs_inline)
28433 restore_saved_lr (0, exit_func);
28435 /* Load exception handler data registers, if needed. */
28436 if (crtl->calls_eh_return)
28438 unsigned int i, regno;
28440 if (TARGET_AIX)
28442 rtx reg = gen_rtx_REG (reg_mode, 2);
28443 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28444 frame_off + RS6000_TOC_SAVE_SLOT));
28447 for (i = 0; ; ++i)
28449 rtx mem;
28451 regno = EH_RETURN_DATA_REGNO (i);
28452 if (regno == INVALID_REGNUM)
28453 break;
28455 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28456 info->ehrd_offset + frame_off
28457 + reg_size * (int) i);
28459 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28463 /* Restore GPRs. This is done as a PARALLEL if we are using
28464 the load-multiple instructions. */
28465 if (!restoring_GPRs_inline)
28467 /* We are jumping to an out-of-line function. */
28468 rtx ptr_reg;
28469 int end_save = info->gp_save_offset + info->gp_size;
28470 bool can_use_exit = end_save == 0;
28471 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28472 int ptr_off;
28474 /* Emit stack reset code if we need it. */
28475 ptr_regno = ptr_regno_for_savres (sel);
28476 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28477 if (can_use_exit)
28478 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28479 else if (end_save + frame_off != 0)
28480 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28481 GEN_INT (end_save + frame_off)));
28482 else if (REGNO (frame_reg_rtx) != ptr_regno)
28483 emit_move_insn (ptr_reg, frame_reg_rtx);
28484 if (REGNO (frame_reg_rtx) == ptr_regno)
28485 frame_off = -end_save;
28487 if (can_use_exit && info->cr_save_p)
28488 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28490 ptr_off = -end_save;
28491 rs6000_emit_savres_rtx (info, ptr_reg,
28492 info->gp_save_offset + ptr_off,
28493 info->lr_save_offset + ptr_off,
28494 reg_mode, sel);
28496 else if (using_load_multiple)
28498 rtvec p;
28499 p = rtvec_alloc (32 - info->first_gp_reg_save);
28500 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28501 RTVEC_ELT (p, i)
28502 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28503 frame_reg_rtx,
28504 info->gp_save_offset + frame_off + reg_size * i);
28505 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28507 else
28509 int offset = info->gp_save_offset + frame_off;
28510 for (i = info->first_gp_reg_save; i < 32; i++)
28512 if (save_reg_p (i)
28513 && !cfun->machine->gpr_is_wrapped_separately[i])
28515 rtx reg = gen_rtx_REG (reg_mode, i);
28516 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28519 offset += reg_size;
28523 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28525 /* If the frame pointer was used then we can't delay emitting
28526 a REG_CFA_DEF_CFA note. This must happen on the insn that
28527 restores the frame pointer, r31. We may have already emitted
28528 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28529 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28530 be harmless if emitted. */
28531 if (frame_pointer_needed)
28533 insn = get_last_insn ();
28534 add_reg_note (insn, REG_CFA_DEF_CFA,
28535 plus_constant (Pmode, frame_reg_rtx, frame_off));
28536 RTX_FRAME_RELATED_P (insn) = 1;
28539 /* Set up cfa_restores. We always need these when
28540 shrink-wrapping. If not shrink-wrapping then we only need
28541 the cfa_restore when the stack location is no longer valid.
28542 The cfa_restores must be emitted on or before the insn that
28543 invalidates the stack, and of course must not be emitted
28544 before the insn that actually does the restore. The latter
28545 is why it is a bad idea to emit the cfa_restores as a group
28546 on the last instruction here that actually does a restore:
28547 That insn may be reordered with respect to others doing
28548 restores. */
28549 if (flag_shrink_wrap
28550 && !restoring_GPRs_inline
28551 && info->first_fp_reg_save == 64)
28552 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28554 for (i = info->first_gp_reg_save; i < 32; i++)
28555 if (save_reg_p (i)
28556 && !cfun->machine->gpr_is_wrapped_separately[i])
28558 rtx reg = gen_rtx_REG (reg_mode, i);
28559 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28563 if (!restoring_GPRs_inline
28564 && info->first_fp_reg_save == 64)
28566 /* We are jumping to an out-of-line function. */
28567 if (cfa_restores)
28568 emit_cfa_restores (cfa_restores);
28569 return;
28572 if (restore_lr && !restoring_GPRs_inline)
28574 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28575 restore_saved_lr (0, exit_func);
28578 /* Restore fpr's if we need to do it without calling a function. */
28579 if (restoring_FPRs_inline)
28581 int offset = info->fp_save_offset + frame_off;
28582 for (i = info->first_fp_reg_save; i < 64; i++)
28584 if (save_reg_p (i)
28585 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28587 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28588 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28589 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28590 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28591 cfa_restores);
28594 offset += fp_reg_size;
28598 /* If we saved cr, restore it here. Just those that were used. */
28599 if (info->cr_save_p)
28600 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28602 /* If this is V.4, unwind the stack pointer after all of the loads
28603 have been done, or set up r11 if we are restoring fp out of line. */
28604 ptr_regno = 1;
28605 if (!restoring_FPRs_inline)
28607 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28608 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28609 ptr_regno = ptr_regno_for_savres (sel);
28612 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28613 if (REGNO (frame_reg_rtx) == ptr_regno)
28614 frame_off = 0;
28616 if (insn && restoring_FPRs_inline)
28618 if (cfa_restores)
28620 REG_NOTES (insn) = cfa_restores;
28621 cfa_restores = NULL_RTX;
28623 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28624 RTX_FRAME_RELATED_P (insn) = 1;
28627 if (crtl->calls_eh_return)
28629 rtx sa = EH_RETURN_STACKADJ_RTX;
28630 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28633 if (!sibcall && restoring_FPRs_inline)
28635 if (cfa_restores)
28637 /* We can't hang the cfa_restores off a simple return,
28638 since the shrink-wrap code sometimes uses an existing
28639 return. This means there might be a path from
28640 pre-prologue code to this return, and dwarf2cfi code
28641 wants the eh_frame unwinder state to be the same on
28642 all paths to any point. So we need to emit the
28643 cfa_restores before the return. For -m64 we really
28644 don't need epilogue cfa_restores at all, except for
28645 this irritating dwarf2cfi with shrink-wrap
28646 requirement; The stack red-zone means eh_frame info
28647 from the prologue telling the unwinder to restore
28648 from the stack is perfectly good right to the end of
28649 the function. */
28650 emit_insn (gen_blockage ());
28651 emit_cfa_restores (cfa_restores);
28652 cfa_restores = NULL_RTX;
28655 emit_jump_insn (targetm.gen_simple_return ());
28658 if (!sibcall && !restoring_FPRs_inline)
28660 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28661 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28662 int elt = 0;
28663 RTVEC_ELT (p, elt++) = ret_rtx;
28664 if (lr)
28665 RTVEC_ELT (p, elt++)
28666 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28668 /* We have to restore more than two FP registers, so branch to the
28669 restore function. It will return to our caller. */
28670 int i;
28671 int reg;
28672 rtx sym;
28674 if (flag_shrink_wrap)
28675 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28677 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28678 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28679 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28680 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28682 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28684 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28686 RTVEC_ELT (p, elt++)
28687 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28688 if (flag_shrink_wrap
28689 && save_reg_p (info->first_fp_reg_save + i))
28690 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28693 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28696 if (cfa_restores)
28698 if (sibcall)
28699 /* Ensure the cfa_restores are hung off an insn that won't
28700 be reordered above other restores. */
28701 emit_insn (gen_blockage ());
28703 emit_cfa_restores (cfa_restores);
28707 /* Write function epilogue. */
28709 static void
28710 rs6000_output_function_epilogue (FILE *file)
28712 #if TARGET_MACHO
28713 macho_branch_islands ();
28716 rtx_insn *insn = get_last_insn ();
28717 rtx_insn *deleted_debug_label = NULL;
28719 /* Mach-O doesn't support labels at the end of objects, so if
28720 it looks like we might want one, take special action.
28722 First, collect any sequence of deleted debug labels. */
28723 while (insn
28724 && NOTE_P (insn)
28725 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28727 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28728 notes only, instead set their CODE_LABEL_NUMBER to -1,
28729 otherwise there would be code generation differences
28730 in between -g and -g0. */
28731 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28732 deleted_debug_label = insn;
28733 insn = PREV_INSN (insn);
28736 /* Second, if we have:
28737 label:
28738 barrier
28739 then this needs to be detected, so skip past the barrier. */
28741 if (insn && BARRIER_P (insn))
28742 insn = PREV_INSN (insn);
28744 /* Up to now we've only seen notes or barriers. */
28745 if (insn)
28747 if (LABEL_P (insn)
28748 || (NOTE_P (insn)
28749 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28750 /* Trailing label: <barrier>. */
28751 fputs ("\tnop\n", file);
28752 else
28754 /* Lastly, see if we have a completely empty function body. */
28755 while (insn && ! INSN_P (insn))
28756 insn = PREV_INSN (insn);
28757 /* If we don't find any insns, we've got an empty function body;
28758 I.e. completely empty - without a return or branch. This is
28759 taken as the case where a function body has been removed
28760 because it contains an inline __builtin_unreachable(). GCC
28761 states that reaching __builtin_unreachable() means UB so we're
28762 not obliged to do anything special; however, we want
28763 non-zero-sized function bodies. To meet this, and help the
28764 user out, let's trap the case. */
28765 if (insn == NULL)
28766 fputs ("\ttrap\n", file);
28769 else if (deleted_debug_label)
28770 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28771 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28772 CODE_LABEL_NUMBER (insn) = -1;
28774 #endif
28776 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28777 on its format.
28779 We don't output a traceback table if -finhibit-size-directive was
28780 used. The documentation for -finhibit-size-directive reads
28781 ``don't output a @code{.size} assembler directive, or anything
28782 else that would cause trouble if the function is split in the
28783 middle, and the two halves are placed at locations far apart in
28784 memory.'' The traceback table has this property, since it
28785 includes the offset from the start of the function to the
28786 traceback table itself.
28788 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28789 different traceback table. */
28790 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28791 && ! flag_inhibit_size_directive
28792 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28794 const char *fname = NULL;
28795 const char *language_string = lang_hooks.name;
28796 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28797 int i;
28798 int optional_tbtab;
28799 rs6000_stack_t *info = rs6000_stack_info ();
28801 if (rs6000_traceback == traceback_full)
28802 optional_tbtab = 1;
28803 else if (rs6000_traceback == traceback_part)
28804 optional_tbtab = 0;
28805 else
28806 optional_tbtab = !optimize_size && !TARGET_ELF;
28808 if (optional_tbtab)
28810 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28811 while (*fname == '.') /* V.4 encodes . in the name */
28812 fname++;
28814 /* Need label immediately before tbtab, so we can compute
28815 its offset from the function start. */
28816 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28817 ASM_OUTPUT_LABEL (file, fname);
28820 /* The .tbtab pseudo-op can only be used for the first eight
28821 expressions, since it can't handle the possibly variable
28822 length fields that follow. However, if you omit the optional
28823 fields, the assembler outputs zeros for all optional fields
28824 anyways, giving each variable length field is minimum length
28825 (as defined in sys/debug.h). Thus we can not use the .tbtab
28826 pseudo-op at all. */
28828 /* An all-zero word flags the start of the tbtab, for debuggers
28829 that have to find it by searching forward from the entry
28830 point or from the current pc. */
28831 fputs ("\t.long 0\n", file);
28833 /* Tbtab format type. Use format type 0. */
28834 fputs ("\t.byte 0,", file);
28836 /* Language type. Unfortunately, there does not seem to be any
28837 official way to discover the language being compiled, so we
28838 use language_string.
28839 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28840 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28841 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28842 either, so for now use 0. */
28843 if (lang_GNU_C ()
28844 || ! strcmp (language_string, "GNU GIMPLE")
28845 || ! strcmp (language_string, "GNU Go")
28846 || ! strcmp (language_string, "libgccjit"))
28847 i = 0;
28848 else if (! strcmp (language_string, "GNU F77")
28849 || lang_GNU_Fortran ())
28850 i = 1;
28851 else if (! strcmp (language_string, "GNU Pascal"))
28852 i = 2;
28853 else if (! strcmp (language_string, "GNU Ada"))
28854 i = 3;
28855 else if (lang_GNU_CXX ()
28856 || ! strcmp (language_string, "GNU Objective-C++"))
28857 i = 9;
28858 else if (! strcmp (language_string, "GNU Java"))
28859 i = 13;
28860 else if (! strcmp (language_string, "GNU Objective-C"))
28861 i = 14;
28862 else
28863 gcc_unreachable ();
28864 fprintf (file, "%d,", i);
28866 /* 8 single bit fields: global linkage (not set for C extern linkage,
28867 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28868 from start of procedure stored in tbtab, internal function, function
28869 has controlled storage, function has no toc, function uses fp,
28870 function logs/aborts fp operations. */
28871 /* Assume that fp operations are used if any fp reg must be saved. */
28872 fprintf (file, "%d,",
28873 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28875 /* 6 bitfields: function is interrupt handler, name present in
28876 proc table, function calls alloca, on condition directives
28877 (controls stack walks, 3 bits), saves condition reg, saves
28878 link reg. */
28879 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28880 set up as a frame pointer, even when there is no alloca call. */
28881 fprintf (file, "%d,",
28882 ((optional_tbtab << 6)
28883 | ((optional_tbtab & frame_pointer_needed) << 5)
28884 | (info->cr_save_p << 1)
28885 | (info->lr_save_p)));
28887 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28888 (6 bits). */
28889 fprintf (file, "%d,",
28890 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28892 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28893 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28895 if (optional_tbtab)
28897 /* Compute the parameter info from the function decl argument
28898 list. */
28899 tree decl;
28900 int next_parm_info_bit = 31;
28902 for (decl = DECL_ARGUMENTS (current_function_decl);
28903 decl; decl = DECL_CHAIN (decl))
28905 rtx parameter = DECL_INCOMING_RTL (decl);
28906 machine_mode mode = GET_MODE (parameter);
28908 if (GET_CODE (parameter) == REG)
28910 if (SCALAR_FLOAT_MODE_P (mode))
28912 int bits;
28914 float_parms++;
28916 switch (mode)
28918 case E_SFmode:
28919 case E_SDmode:
28920 bits = 0x2;
28921 break;
28923 case E_DFmode:
28924 case E_DDmode:
28925 case E_TFmode:
28926 case E_TDmode:
28927 case E_IFmode:
28928 case E_KFmode:
28929 bits = 0x3;
28930 break;
28932 default:
28933 gcc_unreachable ();
28936 /* If only one bit will fit, don't or in this entry. */
28937 if (next_parm_info_bit > 0)
28938 parm_info |= (bits << (next_parm_info_bit - 1));
28939 next_parm_info_bit -= 2;
28941 else
28943 fixed_parms += ((GET_MODE_SIZE (mode)
28944 + (UNITS_PER_WORD - 1))
28945 / UNITS_PER_WORD);
28946 next_parm_info_bit -= 1;
28952 /* Number of fixed point parameters. */
28953 /* This is actually the number of words of fixed point parameters; thus
28954 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28955 fprintf (file, "%d,", fixed_parms);
28957 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28958 all on stack. */
28959 /* This is actually the number of fp registers that hold parameters;
28960 and thus the maximum value is 13. */
28961 /* Set parameters on stack bit if parameters are not in their original
28962 registers, regardless of whether they are on the stack? Xlc
28963 seems to set the bit when not optimizing. */
28964 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28966 if (optional_tbtab)
28968 /* Optional fields follow. Some are variable length. */
28970 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28971 float, 11 double float. */
28972 /* There is an entry for each parameter in a register, in the order
28973 that they occur in the parameter list. Any intervening arguments
28974 on the stack are ignored. If the list overflows a long (max
28975 possible length 34 bits) then completely leave off all elements
28976 that don't fit. */
28977 /* Only emit this long if there was at least one parameter. */
28978 if (fixed_parms || float_parms)
28979 fprintf (file, "\t.long %d\n", parm_info);
28981 /* Offset from start of code to tb table. */
28982 fputs ("\t.long ", file);
28983 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28984 RS6000_OUTPUT_BASENAME (file, fname);
28985 putc ('-', file);
28986 rs6000_output_function_entry (file, fname);
28987 putc ('\n', file);
28989 /* Interrupt handler mask. */
28990 /* Omit this long, since we never set the interrupt handler bit
28991 above. */
28993 /* Number of CTL (controlled storage) anchors. */
28994 /* Omit this long, since the has_ctl bit is never set above. */
28996 /* Displacement into stack of each CTL anchor. */
28997 /* Omit this list of longs, because there are no CTL anchors. */
28999 /* Length of function name. */
29000 if (*fname == '*')
29001 ++fname;
29002 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29004 /* Function name. */
29005 assemble_string (fname, strlen (fname));
29007 /* Register for alloca automatic storage; this is always reg 31.
29008 Only emit this if the alloca bit was set above. */
29009 if (frame_pointer_needed)
29010 fputs ("\t.byte 31\n", file);
29012 fputs ("\t.align 2\n", file);
29016 /* Arrange to define .LCTOC1 label, if not already done. */
29017 if (need_toc_init)
29019 need_toc_init = 0;
29020 if (!toc_initialized)
29022 switch_to_section (toc_section);
29023 switch_to_section (current_function_section ());
29028 /* -fsplit-stack support. */
29030 /* A SYMBOL_REF for __morestack. */
29031 static GTY(()) rtx morestack_ref;
29033 static rtx
29034 gen_add3_const (rtx rt, rtx ra, long c)
29036 if (TARGET_64BIT)
29037 return gen_adddi3 (rt, ra, GEN_INT (c));
29038 else
29039 return gen_addsi3 (rt, ra, GEN_INT (c));
29042 /* Emit -fsplit-stack prologue, which goes before the regular function
29043 prologue (at local entry point in the case of ELFv2). */
29045 void
29046 rs6000_expand_split_stack_prologue (void)
29048 rs6000_stack_t *info = rs6000_stack_info ();
29049 unsigned HOST_WIDE_INT allocate;
29050 long alloc_hi, alloc_lo;
29051 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29052 rtx_insn *insn;
29054 gcc_assert (flag_split_stack && reload_completed);
29056 if (!info->push_p)
29057 return;
29059 if (global_regs[29])
29061 error ("%qs uses register r29", "-fsplit-stack");
29062 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29063 "conflicts with %qD", global_regs_decl[29]);
29066 allocate = info->total_size;
29067 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29069 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29070 return;
29072 if (morestack_ref == NULL_RTX)
29074 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29075 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29076 | SYMBOL_FLAG_FUNCTION);
29079 r0 = gen_rtx_REG (Pmode, 0);
29080 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29081 r12 = gen_rtx_REG (Pmode, 12);
29082 emit_insn (gen_load_split_stack_limit (r0));
29083 /* Always emit two insns here to calculate the requested stack,
29084 so that the linker can edit them when adjusting size for calling
29085 non-split-stack code. */
29086 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29087 alloc_lo = -allocate - alloc_hi;
29088 if (alloc_hi != 0)
29090 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29091 if (alloc_lo != 0)
29092 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29093 else
29094 emit_insn (gen_nop ());
29096 else
29098 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29099 emit_insn (gen_nop ());
29102 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29103 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29104 ok_label = gen_label_rtx ();
29105 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29106 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29107 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29108 pc_rtx);
29109 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29110 JUMP_LABEL (insn) = ok_label;
29111 /* Mark the jump as very likely to be taken. */
29112 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29114 lr = gen_rtx_REG (Pmode, LR_REGNO);
29115 insn = emit_move_insn (r0, lr);
29116 RTX_FRAME_RELATED_P (insn) = 1;
29117 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29118 RTX_FRAME_RELATED_P (insn) = 1;
29120 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29121 const0_rtx, const0_rtx));
29122 call_fusage = NULL_RTX;
29123 use_reg (&call_fusage, r12);
29124 /* Say the call uses r0, even though it doesn't, to stop regrename
29125 from twiddling with the insns saving lr, trashing args for cfun.
29126 The insns restoring lr are similarly protected by making
29127 split_stack_return use r0. */
29128 use_reg (&call_fusage, r0);
29129 add_function_usage_to (insn, call_fusage);
29130 /* Indicate that this function can't jump to non-local gotos. */
29131 make_reg_eh_region_note_nothrow_nononlocal (insn);
29132 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29133 insn = emit_move_insn (lr, r0);
29134 add_reg_note (insn, REG_CFA_RESTORE, lr);
29135 RTX_FRAME_RELATED_P (insn) = 1;
29136 emit_insn (gen_split_stack_return ());
29138 emit_label (ok_label);
29139 LABEL_NUSES (ok_label) = 1;
29142 /* Return the internal arg pointer used for function incoming
29143 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29144 to copy it to a pseudo in order for it to be preserved over calls
29145 and suchlike. We'd really like to use a pseudo here for the
29146 internal arg pointer but data-flow analysis is not prepared to
29147 accept pseudos as live at the beginning of a function. */
29149 static rtx
29150 rs6000_internal_arg_pointer (void)
29152 if (flag_split_stack
29153 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29154 == NULL))
29157 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29159 rtx pat;
29161 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29162 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29164 /* Put the pseudo initialization right after the note at the
29165 beginning of the function. */
29166 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29167 gen_rtx_REG (Pmode, 12));
29168 push_topmost_sequence ();
29169 emit_insn_after (pat, get_insns ());
29170 pop_topmost_sequence ();
29172 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29173 FIRST_PARM_OFFSET (current_function_decl));
29175 return virtual_incoming_args_rtx;
29178 /* We may have to tell the dataflow pass that the split stack prologue
29179 is initializing a register. */
29181 static void
29182 rs6000_live_on_entry (bitmap regs)
29184 if (flag_split_stack)
29185 bitmap_set_bit (regs, 12);
29188 /* Emit -fsplit-stack dynamic stack allocation space check. */
29190 void
29191 rs6000_split_stack_space_check (rtx size, rtx label)
29193 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29194 rtx limit = gen_reg_rtx (Pmode);
29195 rtx requested = gen_reg_rtx (Pmode);
29196 rtx cmp = gen_reg_rtx (CCUNSmode);
29197 rtx jump;
29199 emit_insn (gen_load_split_stack_limit (limit));
29200 if (CONST_INT_P (size))
29201 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29202 else
29204 size = force_reg (Pmode, size);
29205 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29207 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29208 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29209 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29210 gen_rtx_LABEL_REF (VOIDmode, label),
29211 pc_rtx);
29212 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29213 JUMP_LABEL (jump) = label;
29216 /* A C compound statement that outputs the assembler code for a thunk
29217 function, used to implement C++ virtual function calls with
29218 multiple inheritance. The thunk acts as a wrapper around a virtual
29219 function, adjusting the implicit object parameter before handing
29220 control off to the real function.
29222 First, emit code to add the integer DELTA to the location that
29223 contains the incoming first argument. Assume that this argument
29224 contains a pointer, and is the one used to pass the `this' pointer
29225 in C++. This is the incoming argument *before* the function
29226 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29227 values of all other incoming arguments.
29229 After the addition, emit code to jump to FUNCTION, which is a
29230 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29231 not touch the return address. Hence returning from FUNCTION will
29232 return to whoever called the current `thunk'.
29234 The effect must be as if FUNCTION had been called directly with the
29235 adjusted first argument. This macro is responsible for emitting
29236 all of the code for a thunk function; output_function_prologue()
29237 and output_function_epilogue() are not invoked.
29239 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29240 been extracted from it.) It might possibly be useful on some
29241 targets, but probably not.
29243 If you do not define this macro, the target-independent code in the
29244 C++ frontend will generate a less efficient heavyweight thunk that
29245 calls FUNCTION instead of jumping to it. The generic approach does
29246 not support varargs. */
29248 static void
29249 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29250 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29251 tree function)
29253 rtx this_rtx, funexp;
29254 rtx_insn *insn;
29256 reload_completed = 1;
29257 epilogue_completed = 1;
29259 /* Mark the end of the (empty) prologue. */
29260 emit_note (NOTE_INSN_PROLOGUE_END);
29262 /* Find the "this" pointer. If the function returns a structure,
29263 the structure return pointer is in r3. */
29264 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29265 this_rtx = gen_rtx_REG (Pmode, 4);
29266 else
29267 this_rtx = gen_rtx_REG (Pmode, 3);
29269 /* Apply the constant offset, if required. */
29270 if (delta)
29271 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29273 /* Apply the offset from the vtable, if required. */
29274 if (vcall_offset)
29276 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29277 rtx tmp = gen_rtx_REG (Pmode, 12);
29279 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29280 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29282 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29283 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29285 else
29287 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29289 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29291 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29294 /* Generate a tail call to the target function. */
29295 if (!TREE_USED (function))
29297 assemble_external (function);
29298 TREE_USED (function) = 1;
29300 funexp = XEXP (DECL_RTL (function), 0);
29301 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29303 #if TARGET_MACHO
29304 if (MACHOPIC_INDIRECT)
29305 funexp = machopic_indirect_call_target (funexp);
29306 #endif
29308 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29309 generate sibcall RTL explicitly. */
29310 insn = emit_call_insn (
29311 gen_rtx_PARALLEL (VOIDmode,
29312 gen_rtvec (3,
29313 gen_rtx_CALL (VOIDmode,
29314 funexp, const0_rtx),
29315 gen_rtx_USE (VOIDmode, const0_rtx),
29316 simple_return_rtx)));
29317 SIBLING_CALL_P (insn) = 1;
29318 emit_barrier ();
29320 /* Run just enough of rest_of_compilation to get the insns emitted.
29321 There's not really enough bulk here to make other passes such as
29322 instruction scheduling worth while. Note that use_thunk calls
29323 assemble_start_function and assemble_end_function. */
29324 insn = get_insns ();
29325 shorten_branches (insn);
29326 final_start_function (insn, file, 1);
29327 final (insn, file, 1);
29328 final_end_function ();
29330 reload_completed = 0;
29331 epilogue_completed = 0;
29334 /* A quick summary of the various types of 'constant-pool tables'
29335 under PowerPC:
29337 Target Flags Name One table per
29338 AIX (none) AIX TOC object file
29339 AIX -mfull-toc AIX TOC object file
29340 AIX -mminimal-toc AIX minimal TOC translation unit
29341 SVR4/EABI (none) SVR4 SDATA object file
29342 SVR4/EABI -fpic SVR4 pic object file
29343 SVR4/EABI -fPIC SVR4 PIC translation unit
29344 SVR4/EABI -mrelocatable EABI TOC function
29345 SVR4/EABI -maix AIX TOC object file
29346 SVR4/EABI -maix -mminimal-toc
29347 AIX minimal TOC translation unit
29349 Name Reg. Set by entries contains:
29350 made by addrs? fp? sum?
29352 AIX TOC 2 crt0 as Y option option
29353 AIX minimal TOC 30 prolog gcc Y Y option
29354 SVR4 SDATA 13 crt0 gcc N Y N
29355 SVR4 pic 30 prolog ld Y not yet N
29356 SVR4 PIC 30 prolog gcc Y option option
29357 EABI TOC 30 prolog gcc Y option option
29361 /* Hash functions for the hash table. */
29363 static unsigned
29364 rs6000_hash_constant (rtx k)
29366 enum rtx_code code = GET_CODE (k);
29367 machine_mode mode = GET_MODE (k);
29368 unsigned result = (code << 3) ^ mode;
29369 const char *format;
29370 int flen, fidx;
29372 format = GET_RTX_FORMAT (code);
29373 flen = strlen (format);
29374 fidx = 0;
29376 switch (code)
29378 case LABEL_REF:
29379 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29381 case CONST_WIDE_INT:
29383 int i;
29384 flen = CONST_WIDE_INT_NUNITS (k);
29385 for (i = 0; i < flen; i++)
29386 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29387 return result;
29390 case CONST_DOUBLE:
29391 if (mode != VOIDmode)
29392 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29393 flen = 2;
29394 break;
29396 case CODE_LABEL:
29397 fidx = 3;
29398 break;
29400 default:
29401 break;
29404 for (; fidx < flen; fidx++)
29405 switch (format[fidx])
29407 case 's':
29409 unsigned i, len;
29410 const char *str = XSTR (k, fidx);
29411 len = strlen (str);
29412 result = result * 613 + len;
29413 for (i = 0; i < len; i++)
29414 result = result * 613 + (unsigned) str[i];
29415 break;
29417 case 'u':
29418 case 'e':
29419 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29420 break;
29421 case 'i':
29422 case 'n':
29423 result = result * 613 + (unsigned) XINT (k, fidx);
29424 break;
29425 case 'w':
29426 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29427 result = result * 613 + (unsigned) XWINT (k, fidx);
29428 else
29430 size_t i;
29431 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29432 result = result * 613 + (unsigned) (XWINT (k, fidx)
29433 >> CHAR_BIT * i);
29435 break;
29436 case '0':
29437 break;
29438 default:
29439 gcc_unreachable ();
29442 return result;
29445 hashval_t
29446 toc_hasher::hash (toc_hash_struct *thc)
29448 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29451 /* Compare H1 and H2 for equivalence. */
29453 bool
29454 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29456 rtx r1 = h1->key;
29457 rtx r2 = h2->key;
29459 if (h1->key_mode != h2->key_mode)
29460 return 0;
29462 return rtx_equal_p (r1, r2);
29465 /* These are the names given by the C++ front-end to vtables, and
29466 vtable-like objects. Ideally, this logic should not be here;
29467 instead, there should be some programmatic way of inquiring as
29468 to whether or not an object is a vtable. */
29470 #define VTABLE_NAME_P(NAME) \
29471 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29472 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29473 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29474 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29475 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29477 #ifdef NO_DOLLAR_IN_LABEL
29478 /* Return a GGC-allocated character string translating dollar signs in
29479 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29481 const char *
29482 rs6000_xcoff_strip_dollar (const char *name)
29484 char *strip, *p;
29485 const char *q;
29486 size_t len;
29488 q = (const char *) strchr (name, '$');
29490 if (q == 0 || q == name)
29491 return name;
29493 len = strlen (name);
29494 strip = XALLOCAVEC (char, len + 1);
29495 strcpy (strip, name);
29496 p = strip + (q - name);
29497 while (p)
29499 *p = '_';
29500 p = strchr (p + 1, '$');
29503 return ggc_alloc_string (strip, len);
29505 #endif
29507 void
29508 rs6000_output_symbol_ref (FILE *file, rtx x)
29510 const char *name = XSTR (x, 0);
29512 /* Currently C++ toc references to vtables can be emitted before it
29513 is decided whether the vtable is public or private. If this is
29514 the case, then the linker will eventually complain that there is
29515 a reference to an unknown section. Thus, for vtables only,
29516 we emit the TOC reference to reference the identifier and not the
29517 symbol. */
29518 if (VTABLE_NAME_P (name))
29520 RS6000_OUTPUT_BASENAME (file, name);
29522 else
29523 assemble_name (file, name);
29526 /* Output a TOC entry. We derive the entry name from what is being
29527 written. */
29529 void
29530 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29532 char buf[256];
29533 const char *name = buf;
29534 rtx base = x;
29535 HOST_WIDE_INT offset = 0;
29537 gcc_assert (!TARGET_NO_TOC);
29539 /* When the linker won't eliminate them, don't output duplicate
29540 TOC entries (this happens on AIX if there is any kind of TOC,
29541 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29542 CODE_LABELs. */
29543 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29545 struct toc_hash_struct *h;
29547 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29548 time because GGC is not initialized at that point. */
29549 if (toc_hash_table == NULL)
29550 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29552 h = ggc_alloc<toc_hash_struct> ();
29553 h->key = x;
29554 h->key_mode = mode;
29555 h->labelno = labelno;
29557 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29558 if (*found == NULL)
29559 *found = h;
29560 else /* This is indeed a duplicate.
29561 Set this label equal to that label. */
29563 fputs ("\t.set ", file);
29564 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29565 fprintf (file, "%d,", labelno);
29566 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29567 fprintf (file, "%d\n", ((*found)->labelno));
29569 #ifdef HAVE_AS_TLS
29570 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29571 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29572 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29574 fputs ("\t.set ", file);
29575 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29576 fprintf (file, "%d,", labelno);
29577 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29578 fprintf (file, "%d\n", ((*found)->labelno));
29580 #endif
29581 return;
29585 /* If we're going to put a double constant in the TOC, make sure it's
29586 aligned properly when strict alignment is on. */
29587 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29588 && STRICT_ALIGNMENT
29589 && GET_MODE_BITSIZE (mode) >= 64
29590 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29591 ASM_OUTPUT_ALIGN (file, 3);
29594 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29596 /* Handle FP constants specially. Note that if we have a minimal
29597 TOC, things we put here aren't actually in the TOC, so we can allow
29598 FP constants. */
29599 if (GET_CODE (x) == CONST_DOUBLE &&
29600 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29601 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29603 long k[4];
29605 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29606 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29607 else
29608 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29610 if (TARGET_64BIT)
29612 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29613 fputs (DOUBLE_INT_ASM_OP, file);
29614 else
29615 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29616 k[0] & 0xffffffff, k[1] & 0xffffffff,
29617 k[2] & 0xffffffff, k[3] & 0xffffffff);
29618 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29619 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29620 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29621 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29622 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29623 return;
29625 else
29627 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29628 fputs ("\t.long ", file);
29629 else
29630 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29631 k[0] & 0xffffffff, k[1] & 0xffffffff,
29632 k[2] & 0xffffffff, k[3] & 0xffffffff);
29633 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29634 k[0] & 0xffffffff, k[1] & 0xffffffff,
29635 k[2] & 0xffffffff, k[3] & 0xffffffff);
29636 return;
29639 else if (GET_CODE (x) == CONST_DOUBLE &&
29640 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29642 long k[2];
29644 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29645 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29646 else
29647 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29649 if (TARGET_64BIT)
29651 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29652 fputs (DOUBLE_INT_ASM_OP, file);
29653 else
29654 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29655 k[0] & 0xffffffff, k[1] & 0xffffffff);
29656 fprintf (file, "0x%lx%08lx\n",
29657 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29658 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29659 return;
29661 else
29663 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29664 fputs ("\t.long ", file);
29665 else
29666 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29667 k[0] & 0xffffffff, k[1] & 0xffffffff);
29668 fprintf (file, "0x%lx,0x%lx\n",
29669 k[0] & 0xffffffff, k[1] & 0xffffffff);
29670 return;
29673 else if (GET_CODE (x) == CONST_DOUBLE &&
29674 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29676 long l;
29678 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29679 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29680 else
29681 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29683 if (TARGET_64BIT)
29685 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29686 fputs (DOUBLE_INT_ASM_OP, file);
29687 else
29688 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29689 if (WORDS_BIG_ENDIAN)
29690 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29691 else
29692 fprintf (file, "0x%lx\n", l & 0xffffffff);
29693 return;
29695 else
29697 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29698 fputs ("\t.long ", file);
29699 else
29700 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29701 fprintf (file, "0x%lx\n", l & 0xffffffff);
29702 return;
29705 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29707 unsigned HOST_WIDE_INT low;
29708 HOST_WIDE_INT high;
29710 low = INTVAL (x) & 0xffffffff;
29711 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29713 /* TOC entries are always Pmode-sized, so when big-endian
29714 smaller integer constants in the TOC need to be padded.
29715 (This is still a win over putting the constants in
29716 a separate constant pool, because then we'd have
29717 to have both a TOC entry _and_ the actual constant.)
29719 For a 32-bit target, CONST_INT values are loaded and shifted
29720 entirely within `low' and can be stored in one TOC entry. */
29722 /* It would be easy to make this work, but it doesn't now. */
29723 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29725 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29727 low |= high << 32;
29728 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29729 high = (HOST_WIDE_INT) low >> 32;
29730 low &= 0xffffffff;
29733 if (TARGET_64BIT)
29735 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29736 fputs (DOUBLE_INT_ASM_OP, file);
29737 else
29738 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29739 (long) high & 0xffffffff, (long) low & 0xffffffff);
29740 fprintf (file, "0x%lx%08lx\n",
29741 (long) high & 0xffffffff, (long) low & 0xffffffff);
29742 return;
29744 else
29746 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29748 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29749 fputs ("\t.long ", file);
29750 else
29751 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29752 (long) high & 0xffffffff, (long) low & 0xffffffff);
29753 fprintf (file, "0x%lx,0x%lx\n",
29754 (long) high & 0xffffffff, (long) low & 0xffffffff);
29756 else
29758 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29759 fputs ("\t.long ", file);
29760 else
29761 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29762 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29764 return;
29768 if (GET_CODE (x) == CONST)
29770 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29771 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29773 base = XEXP (XEXP (x, 0), 0);
29774 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29777 switch (GET_CODE (base))
29779 case SYMBOL_REF:
29780 name = XSTR (base, 0);
29781 break;
29783 case LABEL_REF:
29784 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29785 CODE_LABEL_NUMBER (XEXP (base, 0)));
29786 break;
29788 case CODE_LABEL:
29789 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29790 break;
29792 default:
29793 gcc_unreachable ();
29796 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29797 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29798 else
29800 fputs ("\t.tc ", file);
29801 RS6000_OUTPUT_BASENAME (file, name);
29803 if (offset < 0)
29804 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29805 else if (offset)
29806 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29808 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29809 after other TOC symbols, reducing overflow of small TOC access
29810 to [TC] symbols. */
29811 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29812 ? "[TE]," : "[TC],", file);
29815 /* Currently C++ toc references to vtables can be emitted before it
29816 is decided whether the vtable is public or private. If this is
29817 the case, then the linker will eventually complain that there is
29818 a TOC reference to an unknown section. Thus, for vtables only,
29819 we emit the TOC reference to reference the symbol and not the
29820 section. */
29821 if (VTABLE_NAME_P (name))
29823 RS6000_OUTPUT_BASENAME (file, name);
29824 if (offset < 0)
29825 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29826 else if (offset > 0)
29827 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29829 else
29830 output_addr_const (file, x);
29832 #if HAVE_AS_TLS
29833 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29835 switch (SYMBOL_REF_TLS_MODEL (base))
29837 case 0:
29838 break;
29839 case TLS_MODEL_LOCAL_EXEC:
29840 fputs ("@le", file);
29841 break;
29842 case TLS_MODEL_INITIAL_EXEC:
29843 fputs ("@ie", file);
29844 break;
29845 /* Use global-dynamic for local-dynamic. */
29846 case TLS_MODEL_GLOBAL_DYNAMIC:
29847 case TLS_MODEL_LOCAL_DYNAMIC:
29848 putc ('\n', file);
29849 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29850 fputs ("\t.tc .", file);
29851 RS6000_OUTPUT_BASENAME (file, name);
29852 fputs ("[TC],", file);
29853 output_addr_const (file, x);
29854 fputs ("@m", file);
29855 break;
29856 default:
29857 gcc_unreachable ();
29860 #endif
29862 putc ('\n', file);
29865 /* Output an assembler pseudo-op to write an ASCII string of N characters
29866 starting at P to FILE.
29868 On the RS/6000, we have to do this using the .byte operation and
29869 write out special characters outside the quoted string.
29870 Also, the assembler is broken; very long strings are truncated,
29871 so we must artificially break them up early. */
29873 void
29874 output_ascii (FILE *file, const char *p, int n)
29876 char c;
29877 int i, count_string;
29878 const char *for_string = "\t.byte \"";
29879 const char *for_decimal = "\t.byte ";
29880 const char *to_close = NULL;
29882 count_string = 0;
29883 for (i = 0; i < n; i++)
29885 c = *p++;
29886 if (c >= ' ' && c < 0177)
29888 if (for_string)
29889 fputs (for_string, file);
29890 putc (c, file);
29892 /* Write two quotes to get one. */
29893 if (c == '"')
29895 putc (c, file);
29896 ++count_string;
29899 for_string = NULL;
29900 for_decimal = "\"\n\t.byte ";
29901 to_close = "\"\n";
29902 ++count_string;
29904 if (count_string >= 512)
29906 fputs (to_close, file);
29908 for_string = "\t.byte \"";
29909 for_decimal = "\t.byte ";
29910 to_close = NULL;
29911 count_string = 0;
29914 else
29916 if (for_decimal)
29917 fputs (for_decimal, file);
29918 fprintf (file, "%d", c);
29920 for_string = "\n\t.byte \"";
29921 for_decimal = ", ";
29922 to_close = "\n";
29923 count_string = 0;
29927 /* Now close the string if we have written one. Then end the line. */
29928 if (to_close)
29929 fputs (to_close, file);
29932 /* Generate a unique section name for FILENAME for a section type
29933 represented by SECTION_DESC. Output goes into BUF.
29935 SECTION_DESC can be any string, as long as it is different for each
29936 possible section type.
29938 We name the section in the same manner as xlc. The name begins with an
29939 underscore followed by the filename (after stripping any leading directory
29940 names) with the last period replaced by the string SECTION_DESC. If
29941 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29942 the name. */
29944 void
29945 rs6000_gen_section_name (char **buf, const char *filename,
29946 const char *section_desc)
29948 const char *q, *after_last_slash, *last_period = 0;
29949 char *p;
29950 int len;
29952 after_last_slash = filename;
29953 for (q = filename; *q; q++)
29955 if (*q == '/')
29956 after_last_slash = q + 1;
29957 else if (*q == '.')
29958 last_period = q;
29961 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29962 *buf = (char *) xmalloc (len);
29964 p = *buf;
29965 *p++ = '_';
29967 for (q = after_last_slash; *q; q++)
29969 if (q == last_period)
29971 strcpy (p, section_desc);
29972 p += strlen (section_desc);
29973 break;
29976 else if (ISALNUM (*q))
29977 *p++ = *q;
29980 if (last_period == 0)
29981 strcpy (p, section_desc);
29982 else
29983 *p = '\0';
29986 /* Emit profile function. */
29988 void
29989 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29991 /* Non-standard profiling for kernels, which just saves LR then calls
29992 _mcount without worrying about arg saves. The idea is to change
29993 the function prologue as little as possible as it isn't easy to
29994 account for arg save/restore code added just for _mcount. */
29995 if (TARGET_PROFILE_KERNEL)
29996 return;
29998 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30000 #ifndef NO_PROFILE_COUNTERS
30001 # define NO_PROFILE_COUNTERS 0
30002 #endif
30003 if (NO_PROFILE_COUNTERS)
30004 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30005 LCT_NORMAL, VOIDmode);
30006 else
30008 char buf[30];
30009 const char *label_name;
30010 rtx fun;
30012 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30013 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30014 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30016 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30017 LCT_NORMAL, VOIDmode, fun, Pmode);
30020 else if (DEFAULT_ABI == ABI_DARWIN)
30022 const char *mcount_name = RS6000_MCOUNT;
30023 int caller_addr_regno = LR_REGNO;
30025 /* Be conservative and always set this, at least for now. */
30026 crtl->uses_pic_offset_table = 1;
30028 #if TARGET_MACHO
30029 /* For PIC code, set up a stub and collect the caller's address
30030 from r0, which is where the prologue puts it. */
30031 if (MACHOPIC_INDIRECT
30032 && crtl->uses_pic_offset_table)
30033 caller_addr_regno = 0;
30034 #endif
30035 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30036 LCT_NORMAL, VOIDmode,
30037 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30041 /* Write function profiler code. */
30043 void
30044 output_function_profiler (FILE *file, int labelno)
30046 char buf[100];
30048 switch (DEFAULT_ABI)
30050 default:
30051 gcc_unreachable ();
30053 case ABI_V4:
30054 if (!TARGET_32BIT)
30056 warning (0, "no profiling of 64-bit code for this ABI");
30057 return;
30059 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30060 fprintf (file, "\tmflr %s\n", reg_names[0]);
30061 if (NO_PROFILE_COUNTERS)
30063 asm_fprintf (file, "\tstw %s,4(%s)\n",
30064 reg_names[0], reg_names[1]);
30066 else if (TARGET_SECURE_PLT && flag_pic)
30068 if (TARGET_LINK_STACK)
30070 char name[32];
30071 get_ppc476_thunk_name (name);
30072 asm_fprintf (file, "\tbl %s\n", name);
30074 else
30075 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30076 asm_fprintf (file, "\tstw %s,4(%s)\n",
30077 reg_names[0], reg_names[1]);
30078 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30079 asm_fprintf (file, "\taddis %s,%s,",
30080 reg_names[12], reg_names[12]);
30081 assemble_name (file, buf);
30082 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30083 assemble_name (file, buf);
30084 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30086 else if (flag_pic == 1)
30088 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30089 asm_fprintf (file, "\tstw %s,4(%s)\n",
30090 reg_names[0], reg_names[1]);
30091 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30092 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30093 assemble_name (file, buf);
30094 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30096 else if (flag_pic > 1)
30098 asm_fprintf (file, "\tstw %s,4(%s)\n",
30099 reg_names[0], reg_names[1]);
30100 /* Now, we need to get the address of the label. */
30101 if (TARGET_LINK_STACK)
30103 char name[32];
30104 get_ppc476_thunk_name (name);
30105 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30106 assemble_name (file, buf);
30107 fputs ("-.\n1:", file);
30108 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30109 asm_fprintf (file, "\taddi %s,%s,4\n",
30110 reg_names[11], reg_names[11]);
30112 else
30114 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30115 assemble_name (file, buf);
30116 fputs ("-.\n1:", file);
30117 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30119 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30120 reg_names[0], reg_names[11]);
30121 asm_fprintf (file, "\tadd %s,%s,%s\n",
30122 reg_names[0], reg_names[0], reg_names[11]);
30124 else
30126 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30127 assemble_name (file, buf);
30128 fputs ("@ha\n", file);
30129 asm_fprintf (file, "\tstw %s,4(%s)\n",
30130 reg_names[0], reg_names[1]);
30131 asm_fprintf (file, "\tla %s,", reg_names[0]);
30132 assemble_name (file, buf);
30133 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30136 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30137 fprintf (file, "\tbl %s%s\n",
30138 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30139 break;
30141 case ABI_AIX:
30142 case ABI_ELFv2:
30143 case ABI_DARWIN:
30144 /* Don't do anything, done in output_profile_hook (). */
30145 break;
30151 /* The following variable value is the last issued insn. */
30153 static rtx_insn *last_scheduled_insn;
30155 /* The following variable helps to balance issuing of load and
30156 store instructions */
30158 static int load_store_pendulum;
30160 /* The following variable helps pair divide insns during scheduling. */
30161 static int divide_cnt;
30162 /* The following variable helps pair and alternate vector and vector load
30163 insns during scheduling. */
30164 static int vec_pairing;
30167 /* Power4 load update and store update instructions are cracked into a
30168 load or store and an integer insn which are executed in the same cycle.
30169 Branches have their own dispatch slot which does not count against the
30170 GCC issue rate, but it changes the program flow so there are no other
30171 instructions to issue in this cycle. */
30173 static int
30174 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30176 last_scheduled_insn = insn;
30177 if (GET_CODE (PATTERN (insn)) == USE
30178 || GET_CODE (PATTERN (insn)) == CLOBBER)
30180 cached_can_issue_more = more;
30181 return cached_can_issue_more;
30184 if (insn_terminates_group_p (insn, current_group))
30186 cached_can_issue_more = 0;
30187 return cached_can_issue_more;
30190 /* If no reservation, but reach here */
30191 if (recog_memoized (insn) < 0)
30192 return more;
30194 if (rs6000_sched_groups)
30196 if (is_microcoded_insn (insn))
30197 cached_can_issue_more = 0;
30198 else if (is_cracked_insn (insn))
30199 cached_can_issue_more = more > 2 ? more - 2 : 0;
30200 else
30201 cached_can_issue_more = more - 1;
30203 return cached_can_issue_more;
30206 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30207 return 0;
30209 cached_can_issue_more = more - 1;
30210 return cached_can_issue_more;
30213 static int
30214 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30216 int r = rs6000_variable_issue_1 (insn, more);
30217 if (verbose)
30218 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30219 return r;
30222 /* Adjust the cost of a scheduling dependency. Return the new cost of
30223 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30225 static int
30226 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30227 unsigned int)
30229 enum attr_type attr_type;
30231 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30232 return cost;
30234 switch (dep_type)
30236 case REG_DEP_TRUE:
30238 /* Data dependency; DEP_INSN writes a register that INSN reads
30239 some cycles later. */
30241 /* Separate a load from a narrower, dependent store. */
30242 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30243 && GET_CODE (PATTERN (insn)) == SET
30244 && GET_CODE (PATTERN (dep_insn)) == SET
30245 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30246 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30247 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30248 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30249 return cost + 14;
30251 attr_type = get_attr_type (insn);
30253 switch (attr_type)
30255 case TYPE_JMPREG:
30256 /* Tell the first scheduling pass about the latency between
30257 a mtctr and bctr (and mtlr and br/blr). The first
30258 scheduling pass will not know about this latency since
30259 the mtctr instruction, which has the latency associated
30260 to it, will be generated by reload. */
30261 return 4;
30262 case TYPE_BRANCH:
30263 /* Leave some extra cycles between a compare and its
30264 dependent branch, to inhibit expensive mispredicts. */
30265 if ((rs6000_cpu_attr == CPU_PPC603
30266 || rs6000_cpu_attr == CPU_PPC604
30267 || rs6000_cpu_attr == CPU_PPC604E
30268 || rs6000_cpu_attr == CPU_PPC620
30269 || rs6000_cpu_attr == CPU_PPC630
30270 || rs6000_cpu_attr == CPU_PPC750
30271 || rs6000_cpu_attr == CPU_PPC7400
30272 || rs6000_cpu_attr == CPU_PPC7450
30273 || rs6000_cpu_attr == CPU_PPCE5500
30274 || rs6000_cpu_attr == CPU_PPCE6500
30275 || rs6000_cpu_attr == CPU_POWER4
30276 || rs6000_cpu_attr == CPU_POWER5
30277 || rs6000_cpu_attr == CPU_POWER7
30278 || rs6000_cpu_attr == CPU_POWER8
30279 || rs6000_cpu_attr == CPU_POWER9
30280 || rs6000_cpu_attr == CPU_CELL)
30281 && recog_memoized (dep_insn)
30282 && (INSN_CODE (dep_insn) >= 0))
30284 switch (get_attr_type (dep_insn))
30286 case TYPE_CMP:
30287 case TYPE_FPCOMPARE:
30288 case TYPE_CR_LOGICAL:
30289 case TYPE_DELAYED_CR:
30290 return cost + 2;
30291 case TYPE_EXTS:
30292 case TYPE_MUL:
30293 if (get_attr_dot (dep_insn) == DOT_YES)
30294 return cost + 2;
30295 else
30296 break;
30297 case TYPE_SHIFT:
30298 if (get_attr_dot (dep_insn) == DOT_YES
30299 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30300 return cost + 2;
30301 else
30302 break;
30303 default:
30304 break;
30306 break;
30308 case TYPE_STORE:
30309 case TYPE_FPSTORE:
30310 if ((rs6000_cpu == PROCESSOR_POWER6)
30311 && recog_memoized (dep_insn)
30312 && (INSN_CODE (dep_insn) >= 0))
30315 if (GET_CODE (PATTERN (insn)) != SET)
30316 /* If this happens, we have to extend this to schedule
30317 optimally. Return default for now. */
30318 return cost;
30320 /* Adjust the cost for the case where the value written
30321 by a fixed point operation is used as the address
30322 gen value on a store. */
30323 switch (get_attr_type (dep_insn))
30325 case TYPE_LOAD:
30326 case TYPE_CNTLZ:
30328 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30329 return get_attr_sign_extend (dep_insn)
30330 == SIGN_EXTEND_YES ? 6 : 4;
30331 break;
30333 case TYPE_SHIFT:
30335 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30336 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30337 6 : 3;
30338 break;
30340 case TYPE_INTEGER:
30341 case TYPE_ADD:
30342 case TYPE_LOGICAL:
30343 case TYPE_EXTS:
30344 case TYPE_INSERT:
30346 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30347 return 3;
30348 break;
30350 case TYPE_STORE:
30351 case TYPE_FPLOAD:
30352 case TYPE_FPSTORE:
30354 if (get_attr_update (dep_insn) == UPDATE_YES
30355 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30356 return 3;
30357 break;
30359 case TYPE_MUL:
30361 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30362 return 17;
30363 break;
30365 case TYPE_DIV:
30367 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30368 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30369 break;
30371 default:
30372 break;
30375 break;
30377 case TYPE_LOAD:
30378 if ((rs6000_cpu == PROCESSOR_POWER6)
30379 && recog_memoized (dep_insn)
30380 && (INSN_CODE (dep_insn) >= 0))
30383 /* Adjust the cost for the case where the value written
30384 by a fixed point instruction is used within the address
30385 gen portion of a subsequent load(u)(x) */
30386 switch (get_attr_type (dep_insn))
30388 case TYPE_LOAD:
30389 case TYPE_CNTLZ:
30391 if (set_to_load_agen (dep_insn, insn))
30392 return get_attr_sign_extend (dep_insn)
30393 == SIGN_EXTEND_YES ? 6 : 4;
30394 break;
30396 case TYPE_SHIFT:
30398 if (set_to_load_agen (dep_insn, insn))
30399 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30400 6 : 3;
30401 break;
30403 case TYPE_INTEGER:
30404 case TYPE_ADD:
30405 case TYPE_LOGICAL:
30406 case TYPE_EXTS:
30407 case TYPE_INSERT:
30409 if (set_to_load_agen (dep_insn, insn))
30410 return 3;
30411 break;
30413 case TYPE_STORE:
30414 case TYPE_FPLOAD:
30415 case TYPE_FPSTORE:
30417 if (get_attr_update (dep_insn) == UPDATE_YES
30418 && set_to_load_agen (dep_insn, insn))
30419 return 3;
30420 break;
30422 case TYPE_MUL:
30424 if (set_to_load_agen (dep_insn, insn))
30425 return 17;
30426 break;
30428 case TYPE_DIV:
30430 if (set_to_load_agen (dep_insn, insn))
30431 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30432 break;
30434 default:
30435 break;
30438 break;
30440 case TYPE_FPLOAD:
30441 if ((rs6000_cpu == PROCESSOR_POWER6)
30442 && get_attr_update (insn) == UPDATE_NO
30443 && recog_memoized (dep_insn)
30444 && (INSN_CODE (dep_insn) >= 0)
30445 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30446 return 2;
30448 default:
30449 break;
30452 /* Fall out to return default cost. */
30454 break;
30456 case REG_DEP_OUTPUT:
30457 /* Output dependency; DEP_INSN writes a register that INSN writes some
30458 cycles later. */
30459 if ((rs6000_cpu == PROCESSOR_POWER6)
30460 && recog_memoized (dep_insn)
30461 && (INSN_CODE (dep_insn) >= 0))
30463 attr_type = get_attr_type (insn);
30465 switch (attr_type)
30467 case TYPE_FP:
30468 case TYPE_FPSIMPLE:
30469 if (get_attr_type (dep_insn) == TYPE_FP
30470 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30471 return 1;
30472 break;
30473 case TYPE_FPLOAD:
30474 if (get_attr_update (insn) == UPDATE_NO
30475 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30476 return 2;
30477 break;
30478 default:
30479 break;
30482 /* Fall through, no cost for output dependency. */
30483 /* FALLTHRU */
30485 case REG_DEP_ANTI:
30486 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30487 cycles later. */
30488 return 0;
30490 default:
30491 gcc_unreachable ();
30494 return cost;
30497 /* Debug version of rs6000_adjust_cost. */
30499 static int
30500 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30501 int cost, unsigned int dw)
30503 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30505 if (ret != cost)
30507 const char *dep;
30509 switch (dep_type)
30511 default: dep = "unknown depencency"; break;
30512 case REG_DEP_TRUE: dep = "data dependency"; break;
30513 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30514 case REG_DEP_ANTI: dep = "anti depencency"; break;
30517 fprintf (stderr,
30518 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30519 "%s, insn:\n", ret, cost, dep);
30521 debug_rtx (insn);
30524 return ret;
30527 /* The function returns a true if INSN is microcoded.
30528 Return false otherwise. */
30530 static bool
30531 is_microcoded_insn (rtx_insn *insn)
30533 if (!insn || !NONDEBUG_INSN_P (insn)
30534 || GET_CODE (PATTERN (insn)) == USE
30535 || GET_CODE (PATTERN (insn)) == CLOBBER)
30536 return false;
30538 if (rs6000_cpu_attr == CPU_CELL)
30539 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30541 if (rs6000_sched_groups
30542 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30544 enum attr_type type = get_attr_type (insn);
30545 if ((type == TYPE_LOAD
30546 && get_attr_update (insn) == UPDATE_YES
30547 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30548 || ((type == TYPE_LOAD || type == TYPE_STORE)
30549 && get_attr_update (insn) == UPDATE_YES
30550 && get_attr_indexed (insn) == INDEXED_YES)
30551 || type == TYPE_MFCR)
30552 return true;
30555 return false;
30558 /* The function returns true if INSN is cracked into 2 instructions
30559 by the processor (and therefore occupies 2 issue slots). */
30561 static bool
30562 is_cracked_insn (rtx_insn *insn)
30564 if (!insn || !NONDEBUG_INSN_P (insn)
30565 || GET_CODE (PATTERN (insn)) == USE
30566 || GET_CODE (PATTERN (insn)) == CLOBBER)
30567 return false;
30569 if (rs6000_sched_groups
30570 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30572 enum attr_type type = get_attr_type (insn);
30573 if ((type == TYPE_LOAD
30574 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30575 && get_attr_update (insn) == UPDATE_NO)
30576 || (type == TYPE_LOAD
30577 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30578 && get_attr_update (insn) == UPDATE_YES
30579 && get_attr_indexed (insn) == INDEXED_NO)
30580 || (type == TYPE_STORE
30581 && get_attr_update (insn) == UPDATE_YES
30582 && get_attr_indexed (insn) == INDEXED_NO)
30583 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30584 && get_attr_update (insn) == UPDATE_YES)
30585 || type == TYPE_DELAYED_CR
30586 || (type == TYPE_EXTS
30587 && get_attr_dot (insn) == DOT_YES)
30588 || (type == TYPE_SHIFT
30589 && get_attr_dot (insn) == DOT_YES
30590 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30591 || (type == TYPE_MUL
30592 && get_attr_dot (insn) == DOT_YES)
30593 || type == TYPE_DIV
30594 || (type == TYPE_INSERT
30595 && get_attr_size (insn) == SIZE_32))
30596 return true;
30599 return false;
30602 /* The function returns true if INSN can be issued only from
30603 the branch slot. */
30605 static bool
30606 is_branch_slot_insn (rtx_insn *insn)
30608 if (!insn || !NONDEBUG_INSN_P (insn)
30609 || GET_CODE (PATTERN (insn)) == USE
30610 || GET_CODE (PATTERN (insn)) == CLOBBER)
30611 return false;
30613 if (rs6000_sched_groups)
30615 enum attr_type type = get_attr_type (insn);
30616 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30617 return true;
30618 return false;
30621 return false;
30624 /* The function returns true if out_inst sets a value that is
30625 used in the address generation computation of in_insn */
30626 static bool
30627 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30629 rtx out_set, in_set;
30631 /* For performance reasons, only handle the simple case where
30632 both loads are a single_set. */
30633 out_set = single_set (out_insn);
30634 if (out_set)
30636 in_set = single_set (in_insn);
30637 if (in_set)
30638 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30641 return false;
30644 /* Try to determine base/offset/size parts of the given MEM.
30645 Return true if successful, false if all the values couldn't
30646 be determined.
30648 This function only looks for REG or REG+CONST address forms.
30649 REG+REG address form will return false. */
30651 static bool
30652 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30653 HOST_WIDE_INT *size)
30655 rtx addr_rtx;
30656 if MEM_SIZE_KNOWN_P (mem)
30657 *size = MEM_SIZE (mem);
30658 else
30659 return false;
30661 addr_rtx = (XEXP (mem, 0));
30662 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30663 addr_rtx = XEXP (addr_rtx, 1);
30665 *offset = 0;
30666 while (GET_CODE (addr_rtx) == PLUS
30667 && CONST_INT_P (XEXP (addr_rtx, 1)))
30669 *offset += INTVAL (XEXP (addr_rtx, 1));
30670 addr_rtx = XEXP (addr_rtx, 0);
30672 if (!REG_P (addr_rtx))
30673 return false;
30675 *base = addr_rtx;
30676 return true;
30679 /* The function returns true if the target storage location of
30680 mem1 is adjacent to the target storage location of mem2 */
30681 /* Return 1 if memory locations are adjacent. */
30683 static bool
30684 adjacent_mem_locations (rtx mem1, rtx mem2)
30686 rtx reg1, reg2;
30687 HOST_WIDE_INT off1, size1, off2, size2;
30689 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30690 && get_memref_parts (mem2, &reg2, &off2, &size2))
30691 return ((REGNO (reg1) == REGNO (reg2))
30692 && ((off1 + size1 == off2)
30693 || (off2 + size2 == off1)));
30695 return false;
30698 /* This function returns true if it can be determined that the two MEM
30699 locations overlap by at least 1 byte based on base reg/offset/size. */
30701 static bool
30702 mem_locations_overlap (rtx mem1, rtx mem2)
30704 rtx reg1, reg2;
30705 HOST_WIDE_INT off1, size1, off2, size2;
30707 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30708 && get_memref_parts (mem2, &reg2, &off2, &size2))
30709 return ((REGNO (reg1) == REGNO (reg2))
30710 && (((off1 <= off2) && (off1 + size1 > off2))
30711 || ((off2 <= off1) && (off2 + size2 > off1))));
30713 return false;
30716 /* A C statement (sans semicolon) to update the integer scheduling
30717 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30718 INSN earlier, reduce the priority to execute INSN later. Do not
30719 define this macro if you do not need to adjust the scheduling
30720 priorities of insns. */
30722 static int
30723 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30725 rtx load_mem, str_mem;
30726 /* On machines (like the 750) which have asymmetric integer units,
30727 where one integer unit can do multiply and divides and the other
30728 can't, reduce the priority of multiply/divide so it is scheduled
30729 before other integer operations. */
30731 #if 0
30732 if (! INSN_P (insn))
30733 return priority;
30735 if (GET_CODE (PATTERN (insn)) == USE)
30736 return priority;
30738 switch (rs6000_cpu_attr) {
30739 case CPU_PPC750:
30740 switch (get_attr_type (insn))
30742 default:
30743 break;
30745 case TYPE_MUL:
30746 case TYPE_DIV:
30747 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30748 priority, priority);
30749 if (priority >= 0 && priority < 0x01000000)
30750 priority >>= 3;
30751 break;
30754 #endif
30756 if (insn_must_be_first_in_group (insn)
30757 && reload_completed
30758 && current_sched_info->sched_max_insns_priority
30759 && rs6000_sched_restricted_insns_priority)
30762 /* Prioritize insns that can be dispatched only in the first
30763 dispatch slot. */
30764 if (rs6000_sched_restricted_insns_priority == 1)
30765 /* Attach highest priority to insn. This means that in
30766 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30767 precede 'priority' (critical path) considerations. */
30768 return current_sched_info->sched_max_insns_priority;
30769 else if (rs6000_sched_restricted_insns_priority == 2)
30770 /* Increase priority of insn by a minimal amount. This means that in
30771 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30772 considerations precede dispatch-slot restriction considerations. */
30773 return (priority + 1);
30776 if (rs6000_cpu == PROCESSOR_POWER6
30777 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30778 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30779 /* Attach highest priority to insn if the scheduler has just issued two
30780 stores and this instruction is a load, or two loads and this instruction
30781 is a store. Power6 wants loads and stores scheduled alternately
30782 when possible */
30783 return current_sched_info->sched_max_insns_priority;
30785 return priority;
30788 /* Return true if the instruction is nonpipelined on the Cell. */
30789 static bool
30790 is_nonpipeline_insn (rtx_insn *insn)
30792 enum attr_type type;
30793 if (!insn || !NONDEBUG_INSN_P (insn)
30794 || GET_CODE (PATTERN (insn)) == USE
30795 || GET_CODE (PATTERN (insn)) == CLOBBER)
30796 return false;
30798 type = get_attr_type (insn);
30799 if (type == TYPE_MUL
30800 || type == TYPE_DIV
30801 || type == TYPE_SDIV
30802 || type == TYPE_DDIV
30803 || type == TYPE_SSQRT
30804 || type == TYPE_DSQRT
30805 || type == TYPE_MFCR
30806 || type == TYPE_MFCRF
30807 || type == TYPE_MFJMPR)
30809 return true;
30811 return false;
30815 /* Return how many instructions the machine can issue per cycle. */
30817 static int
30818 rs6000_issue_rate (void)
30820 /* Unless scheduling for register pressure, use issue rate of 1 for
30821 first scheduling pass to decrease degradation. */
30822 if (!reload_completed && !flag_sched_pressure)
30823 return 1;
30825 switch (rs6000_cpu_attr) {
30826 case CPU_RS64A:
30827 case CPU_PPC601: /* ? */
30828 case CPU_PPC7450:
30829 return 3;
30830 case CPU_PPC440:
30831 case CPU_PPC603:
30832 case CPU_PPC750:
30833 case CPU_PPC7400:
30834 case CPU_PPC8540:
30835 case CPU_PPC8548:
30836 case CPU_CELL:
30837 case CPU_PPCE300C2:
30838 case CPU_PPCE300C3:
30839 case CPU_PPCE500MC:
30840 case CPU_PPCE500MC64:
30841 case CPU_PPCE5500:
30842 case CPU_PPCE6500:
30843 case CPU_TITAN:
30844 return 2;
30845 case CPU_PPC476:
30846 case CPU_PPC604:
30847 case CPU_PPC604E:
30848 case CPU_PPC620:
30849 case CPU_PPC630:
30850 return 4;
30851 case CPU_POWER4:
30852 case CPU_POWER5:
30853 case CPU_POWER6:
30854 case CPU_POWER7:
30855 return 5;
30856 case CPU_POWER8:
30857 return 7;
30858 case CPU_POWER9:
30859 return 6;
30860 default:
30861 return 1;
30865 /* Return how many instructions to look ahead for better insn
30866 scheduling. */
30868 static int
30869 rs6000_use_sched_lookahead (void)
30871 switch (rs6000_cpu_attr)
30873 case CPU_PPC8540:
30874 case CPU_PPC8548:
30875 return 4;
30877 case CPU_CELL:
30878 return (reload_completed ? 8 : 0);
30880 default:
30881 return 0;
30885 /* We are choosing insn from the ready queue. Return zero if INSN can be
30886 chosen. */
30887 static int
30888 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30890 if (ready_index == 0)
30891 return 0;
30893 if (rs6000_cpu_attr != CPU_CELL)
30894 return 0;
30896 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30898 if (!reload_completed
30899 || is_nonpipeline_insn (insn)
30900 || is_microcoded_insn (insn))
30901 return 1;
30903 return 0;
30906 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30907 and return true. */
30909 static bool
30910 find_mem_ref (rtx pat, rtx *mem_ref)
30912 const char * fmt;
30913 int i, j;
30915 /* stack_tie does not produce any real memory traffic. */
30916 if (tie_operand (pat, VOIDmode))
30917 return false;
30919 if (GET_CODE (pat) == MEM)
30921 *mem_ref = pat;
30922 return true;
30925 /* Recursively process the pattern. */
30926 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30928 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30930 if (fmt[i] == 'e')
30932 if (find_mem_ref (XEXP (pat, i), mem_ref))
30933 return true;
30935 else if (fmt[i] == 'E')
30936 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30938 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30939 return true;
30943 return false;
30946 /* Determine if PAT is a PATTERN of a load insn. */
30948 static bool
30949 is_load_insn1 (rtx pat, rtx *load_mem)
30951 if (!pat || pat == NULL_RTX)
30952 return false;
30954 if (GET_CODE (pat) == SET)
30955 return find_mem_ref (SET_SRC (pat), load_mem);
30957 if (GET_CODE (pat) == PARALLEL)
30959 int i;
30961 for (i = 0; i < XVECLEN (pat, 0); i++)
30962 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30963 return true;
30966 return false;
30969 /* Determine if INSN loads from memory. */
30971 static bool
30972 is_load_insn (rtx insn, rtx *load_mem)
30974 if (!insn || !INSN_P (insn))
30975 return false;
30977 if (CALL_P (insn))
30978 return false;
30980 return is_load_insn1 (PATTERN (insn), load_mem);
30983 /* Determine if PAT is a PATTERN of a store insn. */
30985 static bool
30986 is_store_insn1 (rtx pat, rtx *str_mem)
30988 if (!pat || pat == NULL_RTX)
30989 return false;
30991 if (GET_CODE (pat) == SET)
30992 return find_mem_ref (SET_DEST (pat), str_mem);
30994 if (GET_CODE (pat) == PARALLEL)
30996 int i;
30998 for (i = 0; i < XVECLEN (pat, 0); i++)
30999 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31000 return true;
31003 return false;
31006 /* Determine if INSN stores to memory. */
31008 static bool
31009 is_store_insn (rtx insn, rtx *str_mem)
31011 if (!insn || !INSN_P (insn))
31012 return false;
31014 return is_store_insn1 (PATTERN (insn), str_mem);
31017 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31019 static bool
31020 is_power9_pairable_vec_type (enum attr_type type)
31022 switch (type)
31024 case TYPE_VECSIMPLE:
31025 case TYPE_VECCOMPLEX:
31026 case TYPE_VECDIV:
31027 case TYPE_VECCMP:
31028 case TYPE_VECPERM:
31029 case TYPE_VECFLOAT:
31030 case TYPE_VECFDIV:
31031 case TYPE_VECDOUBLE:
31032 return true;
31033 default:
31034 break;
31036 return false;
31039 /* Returns whether the dependence between INSN and NEXT is considered
31040 costly by the given target. */
31042 static bool
31043 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31045 rtx insn;
31046 rtx next;
31047 rtx load_mem, str_mem;
31049 /* If the flag is not enabled - no dependence is considered costly;
31050 allow all dependent insns in the same group.
31051 This is the most aggressive option. */
31052 if (rs6000_sched_costly_dep == no_dep_costly)
31053 return false;
31055 /* If the flag is set to 1 - a dependence is always considered costly;
31056 do not allow dependent instructions in the same group.
31057 This is the most conservative option. */
31058 if (rs6000_sched_costly_dep == all_deps_costly)
31059 return true;
31061 insn = DEP_PRO (dep);
31062 next = DEP_CON (dep);
31064 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31065 && is_load_insn (next, &load_mem)
31066 && is_store_insn (insn, &str_mem))
31067 /* Prevent load after store in the same group. */
31068 return true;
31070 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31071 && is_load_insn (next, &load_mem)
31072 && is_store_insn (insn, &str_mem)
31073 && DEP_TYPE (dep) == REG_DEP_TRUE
31074 && mem_locations_overlap(str_mem, load_mem))
31075 /* Prevent load after store in the same group if it is a true
31076 dependence. */
31077 return true;
31079 /* The flag is set to X; dependences with latency >= X are considered costly,
31080 and will not be scheduled in the same group. */
31081 if (rs6000_sched_costly_dep <= max_dep_latency
31082 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31083 return true;
31085 return false;
31088 /* Return the next insn after INSN that is found before TAIL is reached,
31089 skipping any "non-active" insns - insns that will not actually occupy
31090 an issue slot. Return NULL_RTX if such an insn is not found. */
31092 static rtx_insn *
31093 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31095 if (insn == NULL_RTX || insn == tail)
31096 return NULL;
31098 while (1)
31100 insn = NEXT_INSN (insn);
31101 if (insn == NULL_RTX || insn == tail)
31102 return NULL;
31104 if (CALL_P (insn)
31105 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31106 || (NONJUMP_INSN_P (insn)
31107 && GET_CODE (PATTERN (insn)) != USE
31108 && GET_CODE (PATTERN (insn)) != CLOBBER
31109 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31110 break;
31112 return insn;
31115 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31117 static int
31118 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31120 int pos;
31121 int i;
31122 rtx_insn *tmp;
31123 enum attr_type type, type2;
31125 type = get_attr_type (last_scheduled_insn);
31127 /* Try to issue fixed point divides back-to-back in pairs so they will be
31128 routed to separate execution units and execute in parallel. */
31129 if (type == TYPE_DIV && divide_cnt == 0)
31131 /* First divide has been scheduled. */
31132 divide_cnt = 1;
31134 /* Scan the ready list looking for another divide, if found move it
31135 to the end of the list so it is chosen next. */
31136 pos = lastpos;
31137 while (pos >= 0)
31139 if (recog_memoized (ready[pos]) >= 0
31140 && get_attr_type (ready[pos]) == TYPE_DIV)
31142 tmp = ready[pos];
31143 for (i = pos; i < lastpos; i++)
31144 ready[i] = ready[i + 1];
31145 ready[lastpos] = tmp;
31146 break;
31148 pos--;
31151 else
31153 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31154 divide_cnt = 0;
31156 /* The best dispatch throughput for vector and vector load insns can be
31157 achieved by interleaving a vector and vector load such that they'll
31158 dispatch to the same superslice. If this pairing cannot be achieved
31159 then it is best to pair vector insns together and vector load insns
31160 together.
31162 To aid in this pairing, vec_pairing maintains the current state with
31163 the following values:
31165 0 : Initial state, no vecload/vector pairing has been started.
31167 1 : A vecload or vector insn has been issued and a candidate for
31168 pairing has been found and moved to the end of the ready
31169 list. */
31170 if (type == TYPE_VECLOAD)
31172 /* Issued a vecload. */
31173 if (vec_pairing == 0)
31175 int vecload_pos = -1;
31176 /* We issued a single vecload, look for a vector insn to pair it
31177 with. If one isn't found, try to pair another vecload. */
31178 pos = lastpos;
31179 while (pos >= 0)
31181 if (recog_memoized (ready[pos]) >= 0)
31183 type2 = get_attr_type (ready[pos]);
31184 if (is_power9_pairable_vec_type (type2))
31186 /* Found a vector insn to pair with, move it to the
31187 end of the ready list so it is scheduled next. */
31188 tmp = ready[pos];
31189 for (i = pos; i < lastpos; i++)
31190 ready[i] = ready[i + 1];
31191 ready[lastpos] = tmp;
31192 vec_pairing = 1;
31193 return cached_can_issue_more;
31195 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31196 /* Remember position of first vecload seen. */
31197 vecload_pos = pos;
31199 pos--;
31201 if (vecload_pos >= 0)
31203 /* Didn't find a vector to pair with but did find a vecload,
31204 move it to the end of the ready list. */
31205 tmp = ready[vecload_pos];
31206 for (i = vecload_pos; i < lastpos; i++)
31207 ready[i] = ready[i + 1];
31208 ready[lastpos] = tmp;
31209 vec_pairing = 1;
31210 return cached_can_issue_more;
31214 else if (is_power9_pairable_vec_type (type))
31216 /* Issued a vector operation. */
31217 if (vec_pairing == 0)
31219 int vec_pos = -1;
31220 /* We issued a single vector insn, look for a vecload to pair it
31221 with. If one isn't found, try to pair another vector. */
31222 pos = lastpos;
31223 while (pos >= 0)
31225 if (recog_memoized (ready[pos]) >= 0)
31227 type2 = get_attr_type (ready[pos]);
31228 if (type2 == TYPE_VECLOAD)
31230 /* Found a vecload insn to pair with, move it to the
31231 end of the ready list so it is scheduled next. */
31232 tmp = ready[pos];
31233 for (i = pos; i < lastpos; i++)
31234 ready[i] = ready[i + 1];
31235 ready[lastpos] = tmp;
31236 vec_pairing = 1;
31237 return cached_can_issue_more;
31239 else if (is_power9_pairable_vec_type (type2)
31240 && vec_pos == -1)
31241 /* Remember position of first vector insn seen. */
31242 vec_pos = pos;
31244 pos--;
31246 if (vec_pos >= 0)
31248 /* Didn't find a vecload to pair with but did find a vector
31249 insn, move it to the end of the ready list. */
31250 tmp = ready[vec_pos];
31251 for (i = vec_pos; i < lastpos; i++)
31252 ready[i] = ready[i + 1];
31253 ready[lastpos] = tmp;
31254 vec_pairing = 1;
31255 return cached_can_issue_more;
31260 /* We've either finished a vec/vecload pair, couldn't find an insn to
31261 continue the current pair, or the last insn had nothing to do with
31262 with pairing. In any case, reset the state. */
31263 vec_pairing = 0;
31266 return cached_can_issue_more;
31269 /* We are about to begin issuing insns for this clock cycle. */
31271 static int
31272 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31273 rtx_insn **ready ATTRIBUTE_UNUSED,
31274 int *pn_ready ATTRIBUTE_UNUSED,
31275 int clock_var ATTRIBUTE_UNUSED)
31277 int n_ready = *pn_ready;
31279 if (sched_verbose)
31280 fprintf (dump, "// rs6000_sched_reorder :\n");
31282 /* Reorder the ready list, if the second to last ready insn
31283 is a nonepipeline insn. */
31284 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31286 if (is_nonpipeline_insn (ready[n_ready - 1])
31287 && (recog_memoized (ready[n_ready - 2]) > 0))
31288 /* Simply swap first two insns. */
31289 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31292 if (rs6000_cpu == PROCESSOR_POWER6)
31293 load_store_pendulum = 0;
31295 return rs6000_issue_rate ();
31298 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31300 static int
31301 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31302 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31304 if (sched_verbose)
31305 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31307 /* For Power6, we need to handle some special cases to try and keep the
31308 store queue from overflowing and triggering expensive flushes.
31310 This code monitors how load and store instructions are being issued
31311 and skews the ready list one way or the other to increase the likelihood
31312 that a desired instruction is issued at the proper time.
31314 A couple of things are done. First, we maintain a "load_store_pendulum"
31315 to track the current state of load/store issue.
31317 - If the pendulum is at zero, then no loads or stores have been
31318 issued in the current cycle so we do nothing.
31320 - If the pendulum is 1, then a single load has been issued in this
31321 cycle and we attempt to locate another load in the ready list to
31322 issue with it.
31324 - If the pendulum is -2, then two stores have already been
31325 issued in this cycle, so we increase the priority of the first load
31326 in the ready list to increase it's likelihood of being chosen first
31327 in the next cycle.
31329 - If the pendulum is -1, then a single store has been issued in this
31330 cycle and we attempt to locate another store in the ready list to
31331 issue with it, preferring a store to an adjacent memory location to
31332 facilitate store pairing in the store queue.
31334 - If the pendulum is 2, then two loads have already been
31335 issued in this cycle, so we increase the priority of the first store
31336 in the ready list to increase it's likelihood of being chosen first
31337 in the next cycle.
31339 - If the pendulum < -2 or > 2, then do nothing.
31341 Note: This code covers the most common scenarios. There exist non
31342 load/store instructions which make use of the LSU and which
31343 would need to be accounted for to strictly model the behavior
31344 of the machine. Those instructions are currently unaccounted
31345 for to help minimize compile time overhead of this code.
31347 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31349 int pos;
31350 int i;
31351 rtx_insn *tmp;
31352 rtx load_mem, str_mem;
31354 if (is_store_insn (last_scheduled_insn, &str_mem))
31355 /* Issuing a store, swing the load_store_pendulum to the left */
31356 load_store_pendulum--;
31357 else if (is_load_insn (last_scheduled_insn, &load_mem))
31358 /* Issuing a load, swing the load_store_pendulum to the right */
31359 load_store_pendulum++;
31360 else
31361 return cached_can_issue_more;
31363 /* If the pendulum is balanced, or there is only one instruction on
31364 the ready list, then all is well, so return. */
31365 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31366 return cached_can_issue_more;
31368 if (load_store_pendulum == 1)
31370 /* A load has been issued in this cycle. Scan the ready list
31371 for another load to issue with it */
31372 pos = *pn_ready-1;
31374 while (pos >= 0)
31376 if (is_load_insn (ready[pos], &load_mem))
31378 /* Found a load. Move it to the head of the ready list,
31379 and adjust it's priority so that it is more likely to
31380 stay there */
31381 tmp = ready[pos];
31382 for (i=pos; i<*pn_ready-1; i++)
31383 ready[i] = ready[i + 1];
31384 ready[*pn_ready-1] = tmp;
31386 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31387 INSN_PRIORITY (tmp)++;
31388 break;
31390 pos--;
31393 else if (load_store_pendulum == -2)
31395 /* Two stores have been issued in this cycle. Increase the
31396 priority of the first load in the ready list to favor it for
31397 issuing in the next cycle. */
31398 pos = *pn_ready-1;
31400 while (pos >= 0)
31402 if (is_load_insn (ready[pos], &load_mem)
31403 && !sel_sched_p ()
31404 && INSN_PRIORITY_KNOWN (ready[pos]))
31406 INSN_PRIORITY (ready[pos])++;
31408 /* Adjust the pendulum to account for the fact that a load
31409 was found and increased in priority. This is to prevent
31410 increasing the priority of multiple loads */
31411 load_store_pendulum--;
31413 break;
31415 pos--;
31418 else if (load_store_pendulum == -1)
31420 /* A store has been issued in this cycle. Scan the ready list for
31421 another store to issue with it, preferring a store to an adjacent
31422 memory location */
31423 int first_store_pos = -1;
31425 pos = *pn_ready-1;
31427 while (pos >= 0)
31429 if (is_store_insn (ready[pos], &str_mem))
31431 rtx str_mem2;
31432 /* Maintain the index of the first store found on the
31433 list */
31434 if (first_store_pos == -1)
31435 first_store_pos = pos;
31437 if (is_store_insn (last_scheduled_insn, &str_mem2)
31438 && adjacent_mem_locations (str_mem, str_mem2))
31440 /* Found an adjacent store. Move it to the head of the
31441 ready list, and adjust it's priority so that it is
31442 more likely to stay there */
31443 tmp = ready[pos];
31444 for (i=pos; i<*pn_ready-1; i++)
31445 ready[i] = ready[i + 1];
31446 ready[*pn_ready-1] = tmp;
31448 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31449 INSN_PRIORITY (tmp)++;
31451 first_store_pos = -1;
31453 break;
31456 pos--;
31459 if (first_store_pos >= 0)
31461 /* An adjacent store wasn't found, but a non-adjacent store was,
31462 so move the non-adjacent store to the front of the ready
31463 list, and adjust its priority so that it is more likely to
31464 stay there. */
31465 tmp = ready[first_store_pos];
31466 for (i=first_store_pos; i<*pn_ready-1; i++)
31467 ready[i] = ready[i + 1];
31468 ready[*pn_ready-1] = tmp;
31469 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31470 INSN_PRIORITY (tmp)++;
31473 else if (load_store_pendulum == 2)
31475 /* Two loads have been issued in this cycle. Increase the priority
31476 of the first store in the ready list to favor it for issuing in
31477 the next cycle. */
31478 pos = *pn_ready-1;
31480 while (pos >= 0)
31482 if (is_store_insn (ready[pos], &str_mem)
31483 && !sel_sched_p ()
31484 && INSN_PRIORITY_KNOWN (ready[pos]))
31486 INSN_PRIORITY (ready[pos])++;
31488 /* Adjust the pendulum to account for the fact that a store
31489 was found and increased in priority. This is to prevent
31490 increasing the priority of multiple stores */
31491 load_store_pendulum++;
31493 break;
31495 pos--;
31500 /* Do Power9 dependent reordering if necessary. */
31501 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31502 && recog_memoized (last_scheduled_insn) >= 0)
31503 return power9_sched_reorder2 (ready, *pn_ready - 1);
31505 return cached_can_issue_more;
31508 /* Return whether the presence of INSN causes a dispatch group termination
31509 of group WHICH_GROUP.
31511 If WHICH_GROUP == current_group, this function will return true if INSN
31512 causes the termination of the current group (i.e, the dispatch group to
31513 which INSN belongs). This means that INSN will be the last insn in the
31514 group it belongs to.
31516 If WHICH_GROUP == previous_group, this function will return true if INSN
31517 causes the termination of the previous group (i.e, the dispatch group that
31518 precedes the group to which INSN belongs). This means that INSN will be
31519 the first insn in the group it belongs to). */
31521 static bool
31522 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31524 bool first, last;
31526 if (! insn)
31527 return false;
31529 first = insn_must_be_first_in_group (insn);
31530 last = insn_must_be_last_in_group (insn);
31532 if (first && last)
31533 return true;
31535 if (which_group == current_group)
31536 return last;
31537 else if (which_group == previous_group)
31538 return first;
31540 return false;
31544 static bool
31545 insn_must_be_first_in_group (rtx_insn *insn)
31547 enum attr_type type;
31549 if (!insn
31550 || NOTE_P (insn)
31551 || DEBUG_INSN_P (insn)
31552 || GET_CODE (PATTERN (insn)) == USE
31553 || GET_CODE (PATTERN (insn)) == CLOBBER)
31554 return false;
31556 switch (rs6000_cpu)
31558 case PROCESSOR_POWER5:
31559 if (is_cracked_insn (insn))
31560 return true;
31561 /* FALLTHRU */
31562 case PROCESSOR_POWER4:
31563 if (is_microcoded_insn (insn))
31564 return true;
31566 if (!rs6000_sched_groups)
31567 return false;
31569 type = get_attr_type (insn);
31571 switch (type)
31573 case TYPE_MFCR:
31574 case TYPE_MFCRF:
31575 case TYPE_MTCR:
31576 case TYPE_DELAYED_CR:
31577 case TYPE_CR_LOGICAL:
31578 case TYPE_MTJMPR:
31579 case TYPE_MFJMPR:
31580 case TYPE_DIV:
31581 case TYPE_LOAD_L:
31582 case TYPE_STORE_C:
31583 case TYPE_ISYNC:
31584 case TYPE_SYNC:
31585 return true;
31586 default:
31587 break;
31589 break;
31590 case PROCESSOR_POWER6:
31591 type = get_attr_type (insn);
31593 switch (type)
31595 case TYPE_EXTS:
31596 case TYPE_CNTLZ:
31597 case TYPE_TRAP:
31598 case TYPE_MUL:
31599 case TYPE_INSERT:
31600 case TYPE_FPCOMPARE:
31601 case TYPE_MFCR:
31602 case TYPE_MTCR:
31603 case TYPE_MFJMPR:
31604 case TYPE_MTJMPR:
31605 case TYPE_ISYNC:
31606 case TYPE_SYNC:
31607 case TYPE_LOAD_L:
31608 case TYPE_STORE_C:
31609 return true;
31610 case TYPE_SHIFT:
31611 if (get_attr_dot (insn) == DOT_NO
31612 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31613 return true;
31614 else
31615 break;
31616 case TYPE_DIV:
31617 if (get_attr_size (insn) == SIZE_32)
31618 return true;
31619 else
31620 break;
31621 case TYPE_LOAD:
31622 case TYPE_STORE:
31623 case TYPE_FPLOAD:
31624 case TYPE_FPSTORE:
31625 if (get_attr_update (insn) == UPDATE_YES)
31626 return true;
31627 else
31628 break;
31629 default:
31630 break;
31632 break;
31633 case PROCESSOR_POWER7:
31634 type = get_attr_type (insn);
31636 switch (type)
31638 case TYPE_CR_LOGICAL:
31639 case TYPE_MFCR:
31640 case TYPE_MFCRF:
31641 case TYPE_MTCR:
31642 case TYPE_DIV:
31643 case TYPE_ISYNC:
31644 case TYPE_LOAD_L:
31645 case TYPE_STORE_C:
31646 case TYPE_MFJMPR:
31647 case TYPE_MTJMPR:
31648 return true;
31649 case TYPE_MUL:
31650 case TYPE_SHIFT:
31651 case TYPE_EXTS:
31652 if (get_attr_dot (insn) == DOT_YES)
31653 return true;
31654 else
31655 break;
31656 case TYPE_LOAD:
31657 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31658 || get_attr_update (insn) == UPDATE_YES)
31659 return true;
31660 else
31661 break;
31662 case TYPE_STORE:
31663 case TYPE_FPLOAD:
31664 case TYPE_FPSTORE:
31665 if (get_attr_update (insn) == UPDATE_YES)
31666 return true;
31667 else
31668 break;
31669 default:
31670 break;
31672 break;
31673 case PROCESSOR_POWER8:
31674 type = get_attr_type (insn);
31676 switch (type)
31678 case TYPE_CR_LOGICAL:
31679 case TYPE_DELAYED_CR:
31680 case TYPE_MFCR:
31681 case TYPE_MFCRF:
31682 case TYPE_MTCR:
31683 case TYPE_SYNC:
31684 case TYPE_ISYNC:
31685 case TYPE_LOAD_L:
31686 case TYPE_STORE_C:
31687 case TYPE_VECSTORE:
31688 case TYPE_MFJMPR:
31689 case TYPE_MTJMPR:
31690 return true;
31691 case TYPE_SHIFT:
31692 case TYPE_EXTS:
31693 case TYPE_MUL:
31694 if (get_attr_dot (insn) == DOT_YES)
31695 return true;
31696 else
31697 break;
31698 case TYPE_LOAD:
31699 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31700 || get_attr_update (insn) == UPDATE_YES)
31701 return true;
31702 else
31703 break;
31704 case TYPE_STORE:
31705 if (get_attr_update (insn) == UPDATE_YES
31706 && get_attr_indexed (insn) == INDEXED_YES)
31707 return true;
31708 else
31709 break;
31710 default:
31711 break;
31713 break;
31714 default:
31715 break;
31718 return false;
31721 static bool
31722 insn_must_be_last_in_group (rtx_insn *insn)
31724 enum attr_type type;
31726 if (!insn
31727 || NOTE_P (insn)
31728 || DEBUG_INSN_P (insn)
31729 || GET_CODE (PATTERN (insn)) == USE
31730 || GET_CODE (PATTERN (insn)) == CLOBBER)
31731 return false;
31733 switch (rs6000_cpu) {
31734 case PROCESSOR_POWER4:
31735 case PROCESSOR_POWER5:
31736 if (is_microcoded_insn (insn))
31737 return true;
31739 if (is_branch_slot_insn (insn))
31740 return true;
31742 break;
31743 case PROCESSOR_POWER6:
31744 type = get_attr_type (insn);
31746 switch (type)
31748 case TYPE_EXTS:
31749 case TYPE_CNTLZ:
31750 case TYPE_TRAP:
31751 case TYPE_MUL:
31752 case TYPE_FPCOMPARE:
31753 case TYPE_MFCR:
31754 case TYPE_MTCR:
31755 case TYPE_MFJMPR:
31756 case TYPE_MTJMPR:
31757 case TYPE_ISYNC:
31758 case TYPE_SYNC:
31759 case TYPE_LOAD_L:
31760 case TYPE_STORE_C:
31761 return true;
31762 case TYPE_SHIFT:
31763 if (get_attr_dot (insn) == DOT_NO
31764 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31765 return true;
31766 else
31767 break;
31768 case TYPE_DIV:
31769 if (get_attr_size (insn) == SIZE_32)
31770 return true;
31771 else
31772 break;
31773 default:
31774 break;
31776 break;
31777 case PROCESSOR_POWER7:
31778 type = get_attr_type (insn);
31780 switch (type)
31782 case TYPE_ISYNC:
31783 case TYPE_SYNC:
31784 case TYPE_LOAD_L:
31785 case TYPE_STORE_C:
31786 return true;
31787 case TYPE_LOAD:
31788 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31789 && get_attr_update (insn) == UPDATE_YES)
31790 return true;
31791 else
31792 break;
31793 case TYPE_STORE:
31794 if (get_attr_update (insn) == UPDATE_YES
31795 && get_attr_indexed (insn) == INDEXED_YES)
31796 return true;
31797 else
31798 break;
31799 default:
31800 break;
31802 break;
31803 case PROCESSOR_POWER8:
31804 type = get_attr_type (insn);
31806 switch (type)
31808 case TYPE_MFCR:
31809 case TYPE_MTCR:
31810 case TYPE_ISYNC:
31811 case TYPE_SYNC:
31812 case TYPE_LOAD_L:
31813 case TYPE_STORE_C:
31814 return true;
31815 case TYPE_LOAD:
31816 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31817 && get_attr_update (insn) == UPDATE_YES)
31818 return true;
31819 else
31820 break;
31821 case TYPE_STORE:
31822 if (get_attr_update (insn) == UPDATE_YES
31823 && get_attr_indexed (insn) == INDEXED_YES)
31824 return true;
31825 else
31826 break;
31827 default:
31828 break;
31830 break;
31831 default:
31832 break;
31835 return false;
31838 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31839 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31841 static bool
31842 is_costly_group (rtx *group_insns, rtx next_insn)
31844 int i;
31845 int issue_rate = rs6000_issue_rate ();
31847 for (i = 0; i < issue_rate; i++)
31849 sd_iterator_def sd_it;
31850 dep_t dep;
31851 rtx insn = group_insns[i];
31853 if (!insn)
31854 continue;
31856 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31858 rtx next = DEP_CON (dep);
31860 if (next == next_insn
31861 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31862 return true;
31866 return false;
31869 /* Utility of the function redefine_groups.
31870 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31871 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31872 to keep it "far" (in a separate group) from GROUP_INSNS, following
31873 one of the following schemes, depending on the value of the flag
31874 -minsert_sched_nops = X:
31875 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31876 in order to force NEXT_INSN into a separate group.
31877 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31878 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31879 insertion (has a group just ended, how many vacant issue slots remain in the
31880 last group, and how many dispatch groups were encountered so far). */
31882 static int
31883 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31884 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31885 int *group_count)
31887 rtx nop;
31888 bool force;
31889 int issue_rate = rs6000_issue_rate ();
31890 bool end = *group_end;
31891 int i;
31893 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31894 return can_issue_more;
31896 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31897 return can_issue_more;
31899 force = is_costly_group (group_insns, next_insn);
31900 if (!force)
31901 return can_issue_more;
31903 if (sched_verbose > 6)
31904 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31905 *group_count ,can_issue_more);
31907 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31909 if (*group_end)
31910 can_issue_more = 0;
31912 /* Since only a branch can be issued in the last issue_slot, it is
31913 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31914 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31915 in this case the last nop will start a new group and the branch
31916 will be forced to the new group. */
31917 if (can_issue_more && !is_branch_slot_insn (next_insn))
31918 can_issue_more--;
31920 /* Do we have a special group ending nop? */
31921 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
31922 || rs6000_cpu_attr == CPU_POWER8)
31924 nop = gen_group_ending_nop ();
31925 emit_insn_before (nop, next_insn);
31926 can_issue_more = 0;
31928 else
31929 while (can_issue_more > 0)
31931 nop = gen_nop ();
31932 emit_insn_before (nop, next_insn);
31933 can_issue_more--;
31936 *group_end = true;
31937 return 0;
31940 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31942 int n_nops = rs6000_sched_insert_nops;
31944 /* Nops can't be issued from the branch slot, so the effective
31945 issue_rate for nops is 'issue_rate - 1'. */
31946 if (can_issue_more == 0)
31947 can_issue_more = issue_rate;
31948 can_issue_more--;
31949 if (can_issue_more == 0)
31951 can_issue_more = issue_rate - 1;
31952 (*group_count)++;
31953 end = true;
31954 for (i = 0; i < issue_rate; i++)
31956 group_insns[i] = 0;
31960 while (n_nops > 0)
31962 nop = gen_nop ();
31963 emit_insn_before (nop, next_insn);
31964 if (can_issue_more == issue_rate - 1) /* new group begins */
31965 end = false;
31966 can_issue_more--;
31967 if (can_issue_more == 0)
31969 can_issue_more = issue_rate - 1;
31970 (*group_count)++;
31971 end = true;
31972 for (i = 0; i < issue_rate; i++)
31974 group_insns[i] = 0;
31977 n_nops--;
31980 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31981 can_issue_more++;
31983 /* Is next_insn going to start a new group? */
31984 *group_end
31985 = (end
31986 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31987 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31988 || (can_issue_more < issue_rate &&
31989 insn_terminates_group_p (next_insn, previous_group)));
31990 if (*group_end && end)
31991 (*group_count)--;
31993 if (sched_verbose > 6)
31994 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31995 *group_count, can_issue_more);
31996 return can_issue_more;
31999 return can_issue_more;
32002 /* This function tries to synch the dispatch groups that the compiler "sees"
32003 with the dispatch groups that the processor dispatcher is expected to
32004 form in practice. It tries to achieve this synchronization by forcing the
32005 estimated processor grouping on the compiler (as opposed to the function
32006 'pad_goups' which tries to force the scheduler's grouping on the processor).
32008 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32009 examines the (estimated) dispatch groups that will be formed by the processor
32010 dispatcher. It marks these group boundaries to reflect the estimated
32011 processor grouping, overriding the grouping that the scheduler had marked.
32012 Depending on the value of the flag '-minsert-sched-nops' this function can
32013 force certain insns into separate groups or force a certain distance between
32014 them by inserting nops, for example, if there exists a "costly dependence"
32015 between the insns.
32017 The function estimates the group boundaries that the processor will form as
32018 follows: It keeps track of how many vacant issue slots are available after
32019 each insn. A subsequent insn will start a new group if one of the following
32020 4 cases applies:
32021 - no more vacant issue slots remain in the current dispatch group.
32022 - only the last issue slot, which is the branch slot, is vacant, but the next
32023 insn is not a branch.
32024 - only the last 2 or less issue slots, including the branch slot, are vacant,
32025 which means that a cracked insn (which occupies two issue slots) can't be
32026 issued in this group.
32027 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32028 start a new group. */
32030 static int
32031 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32032 rtx_insn *tail)
32034 rtx_insn *insn, *next_insn;
32035 int issue_rate;
32036 int can_issue_more;
32037 int slot, i;
32038 bool group_end;
32039 int group_count = 0;
32040 rtx *group_insns;
32042 /* Initialize. */
32043 issue_rate = rs6000_issue_rate ();
32044 group_insns = XALLOCAVEC (rtx, issue_rate);
32045 for (i = 0; i < issue_rate; i++)
32047 group_insns[i] = 0;
32049 can_issue_more = issue_rate;
32050 slot = 0;
32051 insn = get_next_active_insn (prev_head_insn, tail);
32052 group_end = false;
32054 while (insn != NULL_RTX)
32056 slot = (issue_rate - can_issue_more);
32057 group_insns[slot] = insn;
32058 can_issue_more =
32059 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32060 if (insn_terminates_group_p (insn, current_group))
32061 can_issue_more = 0;
32063 next_insn = get_next_active_insn (insn, tail);
32064 if (next_insn == NULL_RTX)
32065 return group_count + 1;
32067 /* Is next_insn going to start a new group? */
32068 group_end
32069 = (can_issue_more == 0
32070 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32071 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32072 || (can_issue_more < issue_rate &&
32073 insn_terminates_group_p (next_insn, previous_group)));
32075 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32076 next_insn, &group_end, can_issue_more,
32077 &group_count);
32079 if (group_end)
32081 group_count++;
32082 can_issue_more = 0;
32083 for (i = 0; i < issue_rate; i++)
32085 group_insns[i] = 0;
32089 if (GET_MODE (next_insn) == TImode && can_issue_more)
32090 PUT_MODE (next_insn, VOIDmode);
32091 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32092 PUT_MODE (next_insn, TImode);
32094 insn = next_insn;
32095 if (can_issue_more == 0)
32096 can_issue_more = issue_rate;
32097 } /* while */
32099 return group_count;
32102 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32103 dispatch group boundaries that the scheduler had marked. Pad with nops
32104 any dispatch groups which have vacant issue slots, in order to force the
32105 scheduler's grouping on the processor dispatcher. The function
32106 returns the number of dispatch groups found. */
32108 static int
32109 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32110 rtx_insn *tail)
32112 rtx_insn *insn, *next_insn;
32113 rtx nop;
32114 int issue_rate;
32115 int can_issue_more;
32116 int group_end;
32117 int group_count = 0;
32119 /* Initialize issue_rate. */
32120 issue_rate = rs6000_issue_rate ();
32121 can_issue_more = issue_rate;
32123 insn = get_next_active_insn (prev_head_insn, tail);
32124 next_insn = get_next_active_insn (insn, tail);
32126 while (insn != NULL_RTX)
32128 can_issue_more =
32129 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32131 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32133 if (next_insn == NULL_RTX)
32134 break;
32136 if (group_end)
32138 /* If the scheduler had marked group termination at this location
32139 (between insn and next_insn), and neither insn nor next_insn will
32140 force group termination, pad the group with nops to force group
32141 termination. */
32142 if (can_issue_more
32143 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32144 && !insn_terminates_group_p (insn, current_group)
32145 && !insn_terminates_group_p (next_insn, previous_group))
32147 if (!is_branch_slot_insn (next_insn))
32148 can_issue_more--;
32150 while (can_issue_more)
32152 nop = gen_nop ();
32153 emit_insn_before (nop, next_insn);
32154 can_issue_more--;
32158 can_issue_more = issue_rate;
32159 group_count++;
32162 insn = next_insn;
32163 next_insn = get_next_active_insn (insn, tail);
32166 return group_count;
32169 /* We're beginning a new block. Initialize data structures as necessary. */
32171 static void
32172 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32173 int sched_verbose ATTRIBUTE_UNUSED,
32174 int max_ready ATTRIBUTE_UNUSED)
32176 last_scheduled_insn = NULL;
32177 load_store_pendulum = 0;
32178 divide_cnt = 0;
32179 vec_pairing = 0;
32182 /* The following function is called at the end of scheduling BB.
32183 After reload, it inserts nops at insn group bundling. */
32185 static void
32186 rs6000_sched_finish (FILE *dump, int sched_verbose)
32188 int n_groups;
32190 if (sched_verbose)
32191 fprintf (dump, "=== Finishing schedule.\n");
32193 if (reload_completed && rs6000_sched_groups)
32195 /* Do not run sched_finish hook when selective scheduling enabled. */
32196 if (sel_sched_p ())
32197 return;
32199 if (rs6000_sched_insert_nops == sched_finish_none)
32200 return;
32202 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32203 n_groups = pad_groups (dump, sched_verbose,
32204 current_sched_info->prev_head,
32205 current_sched_info->next_tail);
32206 else
32207 n_groups = redefine_groups (dump, sched_verbose,
32208 current_sched_info->prev_head,
32209 current_sched_info->next_tail);
32211 if (sched_verbose >= 6)
32213 fprintf (dump, "ngroups = %d\n", n_groups);
32214 print_rtl (dump, current_sched_info->prev_head);
32215 fprintf (dump, "Done finish_sched\n");
32220 struct rs6000_sched_context
32222 short cached_can_issue_more;
32223 rtx_insn *last_scheduled_insn;
32224 int load_store_pendulum;
32225 int divide_cnt;
32226 int vec_pairing;
32229 typedef struct rs6000_sched_context rs6000_sched_context_def;
32230 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32232 /* Allocate store for new scheduling context. */
32233 static void *
32234 rs6000_alloc_sched_context (void)
32236 return xmalloc (sizeof (rs6000_sched_context_def));
32239 /* If CLEAN_P is true then initializes _SC with clean data,
32240 and from the global context otherwise. */
32241 static void
32242 rs6000_init_sched_context (void *_sc, bool clean_p)
32244 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32246 if (clean_p)
32248 sc->cached_can_issue_more = 0;
32249 sc->last_scheduled_insn = NULL;
32250 sc->load_store_pendulum = 0;
32251 sc->divide_cnt = 0;
32252 sc->vec_pairing = 0;
32254 else
32256 sc->cached_can_issue_more = cached_can_issue_more;
32257 sc->last_scheduled_insn = last_scheduled_insn;
32258 sc->load_store_pendulum = load_store_pendulum;
32259 sc->divide_cnt = divide_cnt;
32260 sc->vec_pairing = vec_pairing;
32264 /* Sets the global scheduling context to the one pointed to by _SC. */
32265 static void
32266 rs6000_set_sched_context (void *_sc)
32268 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32270 gcc_assert (sc != NULL);
32272 cached_can_issue_more = sc->cached_can_issue_more;
32273 last_scheduled_insn = sc->last_scheduled_insn;
32274 load_store_pendulum = sc->load_store_pendulum;
32275 divide_cnt = sc->divide_cnt;
32276 vec_pairing = sc->vec_pairing;
32279 /* Free _SC. */
32280 static void
32281 rs6000_free_sched_context (void *_sc)
32283 gcc_assert (_sc != NULL);
32285 free (_sc);
32288 static bool
32289 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32291 switch (get_attr_type (insn))
32293 case TYPE_DIV:
32294 case TYPE_SDIV:
32295 case TYPE_DDIV:
32296 case TYPE_VECDIV:
32297 case TYPE_SSQRT:
32298 case TYPE_DSQRT:
32299 return false;
32301 default:
32302 return true;
32306 /* Length in units of the trampoline for entering a nested function. */
32309 rs6000_trampoline_size (void)
32311 int ret = 0;
32313 switch (DEFAULT_ABI)
32315 default:
32316 gcc_unreachable ();
32318 case ABI_AIX:
32319 ret = (TARGET_32BIT) ? 12 : 24;
32320 break;
32322 case ABI_ELFv2:
32323 gcc_assert (!TARGET_32BIT);
32324 ret = 32;
32325 break;
32327 case ABI_DARWIN:
32328 case ABI_V4:
32329 ret = (TARGET_32BIT) ? 40 : 48;
32330 break;
32333 return ret;
32336 /* Emit RTL insns to initialize the variable parts of a trampoline.
32337 FNADDR is an RTX for the address of the function's pure code.
32338 CXT is an RTX for the static chain value for the function. */
32340 static void
32341 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32343 int regsize = (TARGET_32BIT) ? 4 : 8;
32344 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32345 rtx ctx_reg = force_reg (Pmode, cxt);
32346 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32348 switch (DEFAULT_ABI)
32350 default:
32351 gcc_unreachable ();
32353 /* Under AIX, just build the 3 word function descriptor */
32354 case ABI_AIX:
32356 rtx fnmem, fn_reg, toc_reg;
32358 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32359 error ("you cannot take the address of a nested function if you use "
32360 "the %qs option", "-mno-pointers-to-nested-functions");
32362 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32363 fn_reg = gen_reg_rtx (Pmode);
32364 toc_reg = gen_reg_rtx (Pmode);
32366 /* Macro to shorten the code expansions below. */
32367 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32369 m_tramp = replace_equiv_address (m_tramp, addr);
32371 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32372 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32373 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32374 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32375 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32377 # undef MEM_PLUS
32379 break;
32381 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32382 case ABI_ELFv2:
32383 case ABI_DARWIN:
32384 case ABI_V4:
32385 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32386 LCT_NORMAL, VOIDmode,
32387 addr, Pmode,
32388 GEN_INT (rs6000_trampoline_size ()), SImode,
32389 fnaddr, Pmode,
32390 ctx_reg, Pmode);
32391 break;
32396 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32397 identifier as an argument, so the front end shouldn't look it up. */
32399 static bool
32400 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32402 return is_attribute_p ("altivec", attr_id);
32405 /* Handle the "altivec" attribute. The attribute may have
32406 arguments as follows:
32408 __attribute__((altivec(vector__)))
32409 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32410 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32412 and may appear more than once (e.g., 'vector bool char') in a
32413 given declaration. */
32415 static tree
32416 rs6000_handle_altivec_attribute (tree *node,
32417 tree name ATTRIBUTE_UNUSED,
32418 tree args,
32419 int flags ATTRIBUTE_UNUSED,
32420 bool *no_add_attrs)
32422 tree type = *node, result = NULL_TREE;
32423 machine_mode mode;
32424 int unsigned_p;
32425 char altivec_type
32426 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32427 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32428 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32429 : '?');
32431 while (POINTER_TYPE_P (type)
32432 || TREE_CODE (type) == FUNCTION_TYPE
32433 || TREE_CODE (type) == METHOD_TYPE
32434 || TREE_CODE (type) == ARRAY_TYPE)
32435 type = TREE_TYPE (type);
32437 mode = TYPE_MODE (type);
32439 /* Check for invalid AltiVec type qualifiers. */
32440 if (type == long_double_type_node)
32441 error ("use of %<long double%> in AltiVec types is invalid");
32442 else if (type == boolean_type_node)
32443 error ("use of boolean types in AltiVec types is invalid");
32444 else if (TREE_CODE (type) == COMPLEX_TYPE)
32445 error ("use of %<complex%> in AltiVec types is invalid");
32446 else if (DECIMAL_FLOAT_MODE_P (mode))
32447 error ("use of decimal floating point types in AltiVec types is invalid");
32448 else if (!TARGET_VSX)
32450 if (type == long_unsigned_type_node || type == long_integer_type_node)
32452 if (TARGET_64BIT)
32453 error ("use of %<long%> in AltiVec types is invalid for "
32454 "64-bit code without %qs", "-mvsx");
32455 else if (rs6000_warn_altivec_long)
32456 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32457 "use %<int%>");
32459 else if (type == long_long_unsigned_type_node
32460 || type == long_long_integer_type_node)
32461 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32462 "-mvsx");
32463 else if (type == double_type_node)
32464 error ("use of %<double%> in AltiVec types is invalid without %qs",
32465 "-mvsx");
32468 switch (altivec_type)
32470 case 'v':
32471 unsigned_p = TYPE_UNSIGNED (type);
32472 switch (mode)
32474 case E_TImode:
32475 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32476 break;
32477 case E_DImode:
32478 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32479 break;
32480 case E_SImode:
32481 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32482 break;
32483 case E_HImode:
32484 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32485 break;
32486 case E_QImode:
32487 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32488 break;
32489 case E_SFmode: result = V4SF_type_node; break;
32490 case E_DFmode: result = V2DF_type_node; break;
32491 /* If the user says 'vector int bool', we may be handed the 'bool'
32492 attribute _before_ the 'vector' attribute, and so select the
32493 proper type in the 'b' case below. */
32494 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32495 case E_V2DImode: case E_V2DFmode:
32496 result = type;
32497 default: break;
32499 break;
32500 case 'b':
32501 switch (mode)
32503 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32504 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32505 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32506 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32507 default: break;
32509 break;
32510 case 'p':
32511 switch (mode)
32513 case E_V8HImode: result = pixel_V8HI_type_node;
32514 default: break;
32516 default: break;
32519 /* Propagate qualifiers attached to the element type
32520 onto the vector type. */
32521 if (result && result != type && TYPE_QUALS (type))
32522 result = build_qualified_type (result, TYPE_QUALS (type));
32524 *no_add_attrs = true; /* No need to hang on to the attribute. */
32526 if (result)
32527 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32529 return NULL_TREE;
32532 /* AltiVec defines four built-in scalar types that serve as vector
32533 elements; we must teach the compiler how to mangle them. */
32535 static const char *
32536 rs6000_mangle_type (const_tree type)
32538 type = TYPE_MAIN_VARIANT (type);
32540 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32541 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32542 return NULL;
32544 if (type == bool_char_type_node) return "U6__boolc";
32545 if (type == bool_short_type_node) return "U6__bools";
32546 if (type == pixel_type_node) return "u7__pixel";
32547 if (type == bool_int_type_node) return "U6__booli";
32548 if (type == bool_long_type_node) return "U6__booll";
32550 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32551 "g" for IBM extended double, no matter whether it is long double (using
32552 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32553 if (TARGET_FLOAT128_TYPE)
32555 if (type == ieee128_float_type_node)
32556 return "U10__float128";
32558 if (type == ibm128_float_type_node)
32559 return "g";
32561 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
32562 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32565 /* Mangle IBM extended float long double as `g' (__float128) on
32566 powerpc*-linux where long-double-64 previously was the default. */
32567 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32568 && TARGET_ELF
32569 && TARGET_LONG_DOUBLE_128
32570 && !TARGET_IEEEQUAD)
32571 return "g";
32573 /* For all other types, use normal C++ mangling. */
32574 return NULL;
32577 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32578 struct attribute_spec.handler. */
32580 static tree
32581 rs6000_handle_longcall_attribute (tree *node, tree name,
32582 tree args ATTRIBUTE_UNUSED,
32583 int flags ATTRIBUTE_UNUSED,
32584 bool *no_add_attrs)
32586 if (TREE_CODE (*node) != FUNCTION_TYPE
32587 && TREE_CODE (*node) != FIELD_DECL
32588 && TREE_CODE (*node) != TYPE_DECL)
32590 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32591 name);
32592 *no_add_attrs = true;
32595 return NULL_TREE;
32598 /* Set longcall attributes on all functions declared when
32599 rs6000_default_long_calls is true. */
32600 static void
32601 rs6000_set_default_type_attributes (tree type)
32603 if (rs6000_default_long_calls
32604 && (TREE_CODE (type) == FUNCTION_TYPE
32605 || TREE_CODE (type) == METHOD_TYPE))
32606 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32607 NULL_TREE,
32608 TYPE_ATTRIBUTES (type));
32610 #if TARGET_MACHO
32611 darwin_set_default_type_attributes (type);
32612 #endif
32615 /* Return a reference suitable for calling a function with the
32616 longcall attribute. */
32619 rs6000_longcall_ref (rtx call_ref)
32621 const char *call_name;
32622 tree node;
32624 if (GET_CODE (call_ref) != SYMBOL_REF)
32625 return call_ref;
32627 /* System V adds '.' to the internal name, so skip them. */
32628 call_name = XSTR (call_ref, 0);
32629 if (*call_name == '.')
32631 while (*call_name == '.')
32632 call_name++;
32634 node = get_identifier (call_name);
32635 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32638 return force_reg (Pmode, call_ref);
32641 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32642 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32643 #endif
32645 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32646 struct attribute_spec.handler. */
32647 static tree
32648 rs6000_handle_struct_attribute (tree *node, tree name,
32649 tree args ATTRIBUTE_UNUSED,
32650 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32652 tree *type = NULL;
32653 if (DECL_P (*node))
32655 if (TREE_CODE (*node) == TYPE_DECL)
32656 type = &TREE_TYPE (*node);
32658 else
32659 type = node;
32661 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32662 || TREE_CODE (*type) == UNION_TYPE)))
32664 warning (OPT_Wattributes, "%qE attribute ignored", name);
32665 *no_add_attrs = true;
32668 else if ((is_attribute_p ("ms_struct", name)
32669 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32670 || ((is_attribute_p ("gcc_struct", name)
32671 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32673 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32674 name);
32675 *no_add_attrs = true;
32678 return NULL_TREE;
32681 static bool
32682 rs6000_ms_bitfield_layout_p (const_tree record_type)
32684 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32685 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32686 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32689 #ifdef USING_ELFOS_H
32691 /* A get_unnamed_section callback, used for switching to toc_section. */
32693 static void
32694 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32696 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32697 && TARGET_MINIMAL_TOC)
32699 if (!toc_initialized)
32701 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32702 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32703 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32704 fprintf (asm_out_file, "\t.tc ");
32705 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32706 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32707 fprintf (asm_out_file, "\n");
32709 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32710 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32711 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32712 fprintf (asm_out_file, " = .+32768\n");
32713 toc_initialized = 1;
32715 else
32716 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32718 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32720 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32721 if (!toc_initialized)
32723 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32724 toc_initialized = 1;
32727 else
32729 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32730 if (!toc_initialized)
32732 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32733 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32734 fprintf (asm_out_file, " = .+32768\n");
32735 toc_initialized = 1;
32740 /* Implement TARGET_ASM_INIT_SECTIONS. */
32742 static void
32743 rs6000_elf_asm_init_sections (void)
32745 toc_section
32746 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32748 sdata2_section
32749 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32750 SDATA2_SECTION_ASM_OP);
32753 /* Implement TARGET_SELECT_RTX_SECTION. */
32755 static section *
32756 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32757 unsigned HOST_WIDE_INT align)
32759 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32760 return toc_section;
32761 else
32762 return default_elf_select_rtx_section (mode, x, align);
32765 /* For a SYMBOL_REF, set generic flags and then perform some
32766 target-specific processing.
32768 When the AIX ABI is requested on a non-AIX system, replace the
32769 function name with the real name (with a leading .) rather than the
32770 function descriptor name. This saves a lot of overriding code to
32771 read the prefixes. */
32773 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32774 static void
32775 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32777 default_encode_section_info (decl, rtl, first);
32779 if (first
32780 && TREE_CODE (decl) == FUNCTION_DECL
32781 && !TARGET_AIX
32782 && DEFAULT_ABI == ABI_AIX)
32784 rtx sym_ref = XEXP (rtl, 0);
32785 size_t len = strlen (XSTR (sym_ref, 0));
32786 char *str = XALLOCAVEC (char, len + 2);
32787 str[0] = '.';
32788 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32789 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32793 static inline bool
32794 compare_section_name (const char *section, const char *templ)
32796 int len;
32798 len = strlen (templ);
32799 return (strncmp (section, templ, len) == 0
32800 && (section[len] == 0 || section[len] == '.'));
32803 bool
32804 rs6000_elf_in_small_data_p (const_tree decl)
32806 if (rs6000_sdata == SDATA_NONE)
32807 return false;
32809 /* We want to merge strings, so we never consider them small data. */
32810 if (TREE_CODE (decl) == STRING_CST)
32811 return false;
32813 /* Functions are never in the small data area. */
32814 if (TREE_CODE (decl) == FUNCTION_DECL)
32815 return false;
32817 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32819 const char *section = DECL_SECTION_NAME (decl);
32820 if (compare_section_name (section, ".sdata")
32821 || compare_section_name (section, ".sdata2")
32822 || compare_section_name (section, ".gnu.linkonce.s")
32823 || compare_section_name (section, ".sbss")
32824 || compare_section_name (section, ".sbss2")
32825 || compare_section_name (section, ".gnu.linkonce.sb")
32826 || strcmp (section, ".PPC.EMB.sdata0") == 0
32827 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32828 return true;
32830 else
32832 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32834 if (size > 0
32835 && size <= g_switch_value
32836 /* If it's not public, and we're not going to reference it there,
32837 there's no need to put it in the small data section. */
32838 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32839 return true;
32842 return false;
32845 #endif /* USING_ELFOS_H */
32847 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32849 static bool
32850 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32852 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32855 /* Do not place thread-local symbols refs in the object blocks. */
32857 static bool
32858 rs6000_use_blocks_for_decl_p (const_tree decl)
32860 return !DECL_THREAD_LOCAL_P (decl);
32863 /* Return a REG that occurs in ADDR with coefficient 1.
32864 ADDR can be effectively incremented by incrementing REG.
32866 r0 is special and we must not select it as an address
32867 register by this routine since our caller will try to
32868 increment the returned register via an "la" instruction. */
32871 find_addr_reg (rtx addr)
32873 while (GET_CODE (addr) == PLUS)
32875 if (GET_CODE (XEXP (addr, 0)) == REG
32876 && REGNO (XEXP (addr, 0)) != 0)
32877 addr = XEXP (addr, 0);
32878 else if (GET_CODE (XEXP (addr, 1)) == REG
32879 && REGNO (XEXP (addr, 1)) != 0)
32880 addr = XEXP (addr, 1);
32881 else if (CONSTANT_P (XEXP (addr, 0)))
32882 addr = XEXP (addr, 1);
32883 else if (CONSTANT_P (XEXP (addr, 1)))
32884 addr = XEXP (addr, 0);
32885 else
32886 gcc_unreachable ();
32888 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32889 return addr;
32892 void
32893 rs6000_fatal_bad_address (rtx op)
32895 fatal_insn ("bad address", op);
32898 #if TARGET_MACHO
32900 typedef struct branch_island_d {
32901 tree function_name;
32902 tree label_name;
32903 int line_number;
32904 } branch_island;
32907 static vec<branch_island, va_gc> *branch_islands;
32909 /* Remember to generate a branch island for far calls to the given
32910 function. */
32912 static void
32913 add_compiler_branch_island (tree label_name, tree function_name,
32914 int line_number)
32916 branch_island bi = {function_name, label_name, line_number};
32917 vec_safe_push (branch_islands, bi);
32920 /* Generate far-jump branch islands for everything recorded in
32921 branch_islands. Invoked immediately after the last instruction of
32922 the epilogue has been emitted; the branch islands must be appended
32923 to, and contiguous with, the function body. Mach-O stubs are
32924 generated in machopic_output_stub(). */
32926 static void
32927 macho_branch_islands (void)
32929 char tmp_buf[512];
32931 while (!vec_safe_is_empty (branch_islands))
32933 branch_island *bi = &branch_islands->last ();
32934 const char *label = IDENTIFIER_POINTER (bi->label_name);
32935 const char *name = IDENTIFIER_POINTER (bi->function_name);
32936 char name_buf[512];
32937 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32938 if (name[0] == '*' || name[0] == '&')
32939 strcpy (name_buf, name+1);
32940 else
32942 name_buf[0] = '_';
32943 strcpy (name_buf+1, name);
32945 strcpy (tmp_buf, "\n");
32946 strcat (tmp_buf, label);
32947 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32948 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32949 dbxout_stabd (N_SLINE, bi->line_number);
32950 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32951 if (flag_pic)
32953 if (TARGET_LINK_STACK)
32955 char name[32];
32956 get_ppc476_thunk_name (name);
32957 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32958 strcat (tmp_buf, name);
32959 strcat (tmp_buf, "\n");
32960 strcat (tmp_buf, label);
32961 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32963 else
32965 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32966 strcat (tmp_buf, label);
32967 strcat (tmp_buf, "_pic\n");
32968 strcat (tmp_buf, label);
32969 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32972 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32973 strcat (tmp_buf, name_buf);
32974 strcat (tmp_buf, " - ");
32975 strcat (tmp_buf, label);
32976 strcat (tmp_buf, "_pic)\n");
32978 strcat (tmp_buf, "\tmtlr r0\n");
32980 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32981 strcat (tmp_buf, name_buf);
32982 strcat (tmp_buf, " - ");
32983 strcat (tmp_buf, label);
32984 strcat (tmp_buf, "_pic)\n");
32986 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32988 else
32990 strcat (tmp_buf, ":\nlis r12,hi16(");
32991 strcat (tmp_buf, name_buf);
32992 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32993 strcat (tmp_buf, name_buf);
32994 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32996 output_asm_insn (tmp_buf, 0);
32997 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32998 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32999 dbxout_stabd (N_SLINE, bi->line_number);
33000 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33001 branch_islands->pop ();
33005 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33006 already there or not. */
33008 static int
33009 no_previous_def (tree function_name)
33011 branch_island *bi;
33012 unsigned ix;
33014 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33015 if (function_name == bi->function_name)
33016 return 0;
33017 return 1;
33020 /* GET_PREV_LABEL gets the label name from the previous definition of
33021 the function. */
33023 static tree
33024 get_prev_label (tree function_name)
33026 branch_island *bi;
33027 unsigned ix;
33029 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33030 if (function_name == bi->function_name)
33031 return bi->label_name;
33032 return NULL_TREE;
33035 /* INSN is either a function call or a millicode call. It may have an
33036 unconditional jump in its delay slot.
33038 CALL_DEST is the routine we are calling. */
33040 char *
33041 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33042 int cookie_operand_number)
33044 static char buf[256];
33045 if (darwin_emit_branch_islands
33046 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33047 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33049 tree labelname;
33050 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33052 if (no_previous_def (funname))
33054 rtx label_rtx = gen_label_rtx ();
33055 char *label_buf, temp_buf[256];
33056 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33057 CODE_LABEL_NUMBER (label_rtx));
33058 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33059 labelname = get_identifier (label_buf);
33060 add_compiler_branch_island (labelname, funname, insn_line (insn));
33062 else
33063 labelname = get_prev_label (funname);
33065 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33066 instruction will reach 'foo', otherwise link as 'bl L42'".
33067 "L42" should be a 'branch island', that will do a far jump to
33068 'foo'. Branch islands are generated in
33069 macho_branch_islands(). */
33070 sprintf (buf, "jbsr %%z%d,%.246s",
33071 dest_operand_number, IDENTIFIER_POINTER (labelname));
33073 else
33074 sprintf (buf, "bl %%z%d", dest_operand_number);
33075 return buf;
33078 /* Generate PIC and indirect symbol stubs. */
33080 void
33081 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33083 unsigned int length;
33084 char *symbol_name, *lazy_ptr_name;
33085 char *local_label_0;
33086 static int label = 0;
33088 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33089 symb = (*targetm.strip_name_encoding) (symb);
33092 length = strlen (symb);
33093 symbol_name = XALLOCAVEC (char, length + 32);
33094 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33096 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33097 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33099 if (flag_pic == 2)
33100 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33101 else
33102 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33104 if (flag_pic == 2)
33106 fprintf (file, "\t.align 5\n");
33108 fprintf (file, "%s:\n", stub);
33109 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33111 label++;
33112 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33113 sprintf (local_label_0, "\"L%011d$spb\"", label);
33115 fprintf (file, "\tmflr r0\n");
33116 if (TARGET_LINK_STACK)
33118 char name[32];
33119 get_ppc476_thunk_name (name);
33120 fprintf (file, "\tbl %s\n", name);
33121 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33123 else
33125 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33126 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33128 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33129 lazy_ptr_name, local_label_0);
33130 fprintf (file, "\tmtlr r0\n");
33131 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33132 (TARGET_64BIT ? "ldu" : "lwzu"),
33133 lazy_ptr_name, local_label_0);
33134 fprintf (file, "\tmtctr r12\n");
33135 fprintf (file, "\tbctr\n");
33137 else
33139 fprintf (file, "\t.align 4\n");
33141 fprintf (file, "%s:\n", stub);
33142 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33144 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33145 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33146 (TARGET_64BIT ? "ldu" : "lwzu"),
33147 lazy_ptr_name);
33148 fprintf (file, "\tmtctr r12\n");
33149 fprintf (file, "\tbctr\n");
33152 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33153 fprintf (file, "%s:\n", lazy_ptr_name);
33154 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33155 fprintf (file, "%sdyld_stub_binding_helper\n",
33156 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33159 /* Legitimize PIC addresses. If the address is already
33160 position-independent, we return ORIG. Newly generated
33161 position-independent addresses go into a reg. This is REG if non
33162 zero, otherwise we allocate register(s) as necessary. */
33164 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33167 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33168 rtx reg)
33170 rtx base, offset;
33172 if (reg == NULL && !reload_completed)
33173 reg = gen_reg_rtx (Pmode);
33175 if (GET_CODE (orig) == CONST)
33177 rtx reg_temp;
33179 if (GET_CODE (XEXP (orig, 0)) == PLUS
33180 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33181 return orig;
33183 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33185 /* Use a different reg for the intermediate value, as
33186 it will be marked UNCHANGING. */
33187 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33188 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33189 Pmode, reg_temp);
33190 offset =
33191 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33192 Pmode, reg);
33194 if (GET_CODE (offset) == CONST_INT)
33196 if (SMALL_INT (offset))
33197 return plus_constant (Pmode, base, INTVAL (offset));
33198 else if (!reload_completed)
33199 offset = force_reg (Pmode, offset);
33200 else
33202 rtx mem = force_const_mem (Pmode, orig);
33203 return machopic_legitimize_pic_address (mem, Pmode, reg);
33206 return gen_rtx_PLUS (Pmode, base, offset);
33209 /* Fall back on generic machopic code. */
33210 return machopic_legitimize_pic_address (orig, mode, reg);
33213 /* Output a .machine directive for the Darwin assembler, and call
33214 the generic start_file routine. */
33216 static void
33217 rs6000_darwin_file_start (void)
33219 static const struct
33221 const char *arg;
33222 const char *name;
33223 HOST_WIDE_INT if_set;
33224 } mapping[] = {
33225 { "ppc64", "ppc64", MASK_64BIT },
33226 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33227 { "power4", "ppc970", 0 },
33228 { "G5", "ppc970", 0 },
33229 { "7450", "ppc7450", 0 },
33230 { "7400", "ppc7400", MASK_ALTIVEC },
33231 { "G4", "ppc7400", 0 },
33232 { "750", "ppc750", 0 },
33233 { "740", "ppc750", 0 },
33234 { "G3", "ppc750", 0 },
33235 { "604e", "ppc604e", 0 },
33236 { "604", "ppc604", 0 },
33237 { "603e", "ppc603", 0 },
33238 { "603", "ppc603", 0 },
33239 { "601", "ppc601", 0 },
33240 { NULL, "ppc", 0 } };
33241 const char *cpu_id = "";
33242 size_t i;
33244 rs6000_file_start ();
33245 darwin_file_start ();
33247 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33249 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33250 cpu_id = rs6000_default_cpu;
33252 if (global_options_set.x_rs6000_cpu_index)
33253 cpu_id = processor_target_table[rs6000_cpu_index].name;
33255 /* Look through the mapping array. Pick the first name that either
33256 matches the argument, has a bit set in IF_SET that is also set
33257 in the target flags, or has a NULL name. */
33259 i = 0;
33260 while (mapping[i].arg != NULL
33261 && strcmp (mapping[i].arg, cpu_id) != 0
33262 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33263 i++;
33265 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33268 #endif /* TARGET_MACHO */
33270 #if TARGET_ELF
33271 static int
33272 rs6000_elf_reloc_rw_mask (void)
33274 if (flag_pic)
33275 return 3;
33276 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33277 return 2;
33278 else
33279 return 0;
33282 /* Record an element in the table of global constructors. SYMBOL is
33283 a SYMBOL_REF of the function to be called; PRIORITY is a number
33284 between 0 and MAX_INIT_PRIORITY.
33286 This differs from default_named_section_asm_out_constructor in
33287 that we have special handling for -mrelocatable. */
33289 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33290 static void
33291 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33293 const char *section = ".ctors";
33294 char buf[18];
33296 if (priority != DEFAULT_INIT_PRIORITY)
33298 sprintf (buf, ".ctors.%.5u",
33299 /* Invert the numbering so the linker puts us in the proper
33300 order; constructors are run from right to left, and the
33301 linker sorts in increasing order. */
33302 MAX_INIT_PRIORITY - priority);
33303 section = buf;
33306 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33307 assemble_align (POINTER_SIZE);
33309 if (DEFAULT_ABI == ABI_V4
33310 && (TARGET_RELOCATABLE || flag_pic > 1))
33312 fputs ("\t.long (", asm_out_file);
33313 output_addr_const (asm_out_file, symbol);
33314 fputs (")@fixup\n", asm_out_file);
33316 else
33317 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33320 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33321 static void
33322 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33324 const char *section = ".dtors";
33325 char buf[18];
33327 if (priority != DEFAULT_INIT_PRIORITY)
33329 sprintf (buf, ".dtors.%.5u",
33330 /* Invert the numbering so the linker puts us in the proper
33331 order; constructors are run from right to left, and the
33332 linker sorts in increasing order. */
33333 MAX_INIT_PRIORITY - priority);
33334 section = buf;
33337 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33338 assemble_align (POINTER_SIZE);
33340 if (DEFAULT_ABI == ABI_V4
33341 && (TARGET_RELOCATABLE || flag_pic > 1))
33343 fputs ("\t.long (", asm_out_file);
33344 output_addr_const (asm_out_file, symbol);
33345 fputs (")@fixup\n", asm_out_file);
33347 else
33348 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33351 void
33352 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33354 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33356 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33357 ASM_OUTPUT_LABEL (file, name);
33358 fputs (DOUBLE_INT_ASM_OP, file);
33359 rs6000_output_function_entry (file, name);
33360 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33361 if (DOT_SYMBOLS)
33363 fputs ("\t.size\t", file);
33364 assemble_name (file, name);
33365 fputs (",24\n\t.type\t.", file);
33366 assemble_name (file, name);
33367 fputs (",@function\n", file);
33368 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33370 fputs ("\t.globl\t.", file);
33371 assemble_name (file, name);
33372 putc ('\n', file);
33375 else
33376 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33377 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33378 rs6000_output_function_entry (file, name);
33379 fputs (":\n", file);
33380 return;
33383 if (DEFAULT_ABI == ABI_V4
33384 && (TARGET_RELOCATABLE || flag_pic > 1)
33385 && !TARGET_SECURE_PLT
33386 && (!constant_pool_empty_p () || crtl->profile)
33387 && uses_TOC ())
33389 char buf[256];
33391 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33393 fprintf (file, "\t.long ");
33394 assemble_name (file, toc_label_name);
33395 need_toc_init = 1;
33396 putc ('-', file);
33397 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33398 assemble_name (file, buf);
33399 putc ('\n', file);
33402 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33403 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33405 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33407 char buf[256];
33409 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33411 fprintf (file, "\t.quad .TOC.-");
33412 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33413 assemble_name (file, buf);
33414 putc ('\n', file);
33417 if (DEFAULT_ABI == ABI_AIX)
33419 const char *desc_name, *orig_name;
33421 orig_name = (*targetm.strip_name_encoding) (name);
33422 desc_name = orig_name;
33423 while (*desc_name == '.')
33424 desc_name++;
33426 if (TREE_PUBLIC (decl))
33427 fprintf (file, "\t.globl %s\n", desc_name);
33429 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33430 fprintf (file, "%s:\n", desc_name);
33431 fprintf (file, "\t.long %s\n", orig_name);
33432 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33433 fputs ("\t.long 0\n", file);
33434 fprintf (file, "\t.previous\n");
33436 ASM_OUTPUT_LABEL (file, name);
33439 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33440 static void
33441 rs6000_elf_file_end (void)
33443 #ifdef HAVE_AS_GNU_ATTRIBUTE
33444 /* ??? The value emitted depends on options active at file end.
33445 Assume anyone using #pragma or attributes that might change
33446 options knows what they are doing. */
33447 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33448 && rs6000_passes_float)
33450 int fp;
33452 if (TARGET_DF_FPR)
33453 fp = 1;
33454 else if (TARGET_SF_FPR)
33455 fp = 3;
33456 else
33457 fp = 2;
33458 if (rs6000_passes_long_double)
33460 if (!TARGET_LONG_DOUBLE_128)
33461 fp |= 2 * 4;
33462 else if (TARGET_IEEEQUAD)
33463 fp |= 3 * 4;
33464 else
33465 fp |= 1 * 4;
33467 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33469 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33471 if (rs6000_passes_vector)
33472 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33473 (TARGET_ALTIVEC_ABI ? 2 : 1));
33474 if (rs6000_returns_struct)
33475 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33476 aix_struct_return ? 2 : 1);
33478 #endif
33479 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33480 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33481 file_end_indicate_exec_stack ();
33482 #endif
33484 if (flag_split_stack)
33485 file_end_indicate_split_stack ();
33487 if (cpu_builtin_p)
33489 /* We have expanded a CPU builtin, so we need to emit a reference to
33490 the special symbol that LIBC uses to declare it supports the
33491 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33492 switch_to_section (data_section);
33493 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33494 fprintf (asm_out_file, "\t%s %s\n",
33495 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33498 #endif
33500 #if TARGET_XCOFF
33502 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33503 #define HAVE_XCOFF_DWARF_EXTRAS 0
33504 #endif
33506 static enum unwind_info_type
33507 rs6000_xcoff_debug_unwind_info (void)
33509 return UI_NONE;
33512 static void
33513 rs6000_xcoff_asm_output_anchor (rtx symbol)
33515 char buffer[100];
33517 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33518 SYMBOL_REF_BLOCK_OFFSET (symbol));
33519 fprintf (asm_out_file, "%s", SET_ASM_OP);
33520 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33521 fprintf (asm_out_file, ",");
33522 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33523 fprintf (asm_out_file, "\n");
33526 static void
33527 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33529 fputs (GLOBAL_ASM_OP, stream);
33530 RS6000_OUTPUT_BASENAME (stream, name);
33531 putc ('\n', stream);
33534 /* A get_unnamed_decl callback, used for read-only sections. PTR
33535 points to the section string variable. */
33537 static void
33538 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33540 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33541 *(const char *const *) directive,
33542 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33545 /* Likewise for read-write sections. */
33547 static void
33548 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33550 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33551 *(const char *const *) directive,
33552 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33555 static void
33556 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33558 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33559 *(const char *const *) directive,
33560 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33563 /* A get_unnamed_section callback, used for switching to toc_section. */
33565 static void
33566 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33568 if (TARGET_MINIMAL_TOC)
33570 /* toc_section is always selected at least once from
33571 rs6000_xcoff_file_start, so this is guaranteed to
33572 always be defined once and only once in each file. */
33573 if (!toc_initialized)
33575 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33576 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33577 toc_initialized = 1;
33579 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33580 (TARGET_32BIT ? "" : ",3"));
33582 else
33583 fputs ("\t.toc\n", asm_out_file);
33586 /* Implement TARGET_ASM_INIT_SECTIONS. */
33588 static void
33589 rs6000_xcoff_asm_init_sections (void)
33591 read_only_data_section
33592 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33593 &xcoff_read_only_section_name);
33595 private_data_section
33596 = get_unnamed_section (SECTION_WRITE,
33597 rs6000_xcoff_output_readwrite_section_asm_op,
33598 &xcoff_private_data_section_name);
33600 tls_data_section
33601 = get_unnamed_section (SECTION_TLS,
33602 rs6000_xcoff_output_tls_section_asm_op,
33603 &xcoff_tls_data_section_name);
33605 tls_private_data_section
33606 = get_unnamed_section (SECTION_TLS,
33607 rs6000_xcoff_output_tls_section_asm_op,
33608 &xcoff_private_data_section_name);
33610 read_only_private_data_section
33611 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33612 &xcoff_private_data_section_name);
33614 toc_section
33615 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33617 readonly_data_section = read_only_data_section;
33620 static int
33621 rs6000_xcoff_reloc_rw_mask (void)
33623 return 3;
33626 static void
33627 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33628 tree decl ATTRIBUTE_UNUSED)
33630 int smclass;
33631 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33633 if (flags & SECTION_EXCLUDE)
33634 smclass = 4;
33635 else if (flags & SECTION_DEBUG)
33637 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33638 return;
33640 else if (flags & SECTION_CODE)
33641 smclass = 0;
33642 else if (flags & SECTION_TLS)
33643 smclass = 3;
33644 else if (flags & SECTION_WRITE)
33645 smclass = 2;
33646 else
33647 smclass = 1;
33649 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33650 (flags & SECTION_CODE) ? "." : "",
33651 name, suffix[smclass], flags & SECTION_ENTSIZE);
33654 #define IN_NAMED_SECTION(DECL) \
33655 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33656 && DECL_SECTION_NAME (DECL) != NULL)
33658 static section *
33659 rs6000_xcoff_select_section (tree decl, int reloc,
33660 unsigned HOST_WIDE_INT align)
33662 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33663 named section. */
33664 if (align > BIGGEST_ALIGNMENT)
33666 resolve_unique_section (decl, reloc, true);
33667 if (IN_NAMED_SECTION (decl))
33668 return get_named_section (decl, NULL, reloc);
33671 if (decl_readonly_section (decl, reloc))
33673 if (TREE_PUBLIC (decl))
33674 return read_only_data_section;
33675 else
33676 return read_only_private_data_section;
33678 else
33680 #if HAVE_AS_TLS
33681 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33683 if (TREE_PUBLIC (decl))
33684 return tls_data_section;
33685 else if (bss_initializer_p (decl))
33687 /* Convert to COMMON to emit in BSS. */
33688 DECL_COMMON (decl) = 1;
33689 return tls_comm_section;
33691 else
33692 return tls_private_data_section;
33694 else
33695 #endif
33696 if (TREE_PUBLIC (decl))
33697 return data_section;
33698 else
33699 return private_data_section;
33703 static void
33704 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33706 const char *name;
33708 /* Use select_section for private data and uninitialized data with
33709 alignment <= BIGGEST_ALIGNMENT. */
33710 if (!TREE_PUBLIC (decl)
33711 || DECL_COMMON (decl)
33712 || (DECL_INITIAL (decl) == NULL_TREE
33713 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33714 || DECL_INITIAL (decl) == error_mark_node
33715 || (flag_zero_initialized_in_bss
33716 && initializer_zerop (DECL_INITIAL (decl))))
33717 return;
33719 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33720 name = (*targetm.strip_name_encoding) (name);
33721 set_decl_section_name (decl, name);
33724 /* Select section for constant in constant pool.
33726 On RS/6000, all constants are in the private read-only data area.
33727 However, if this is being placed in the TOC it must be output as a
33728 toc entry. */
33730 static section *
33731 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33732 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33734 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33735 return toc_section;
33736 else
33737 return read_only_private_data_section;
33740 /* Remove any trailing [DS] or the like from the symbol name. */
33742 static const char *
33743 rs6000_xcoff_strip_name_encoding (const char *name)
33745 size_t len;
33746 if (*name == '*')
33747 name++;
33748 len = strlen (name);
33749 if (name[len - 1] == ']')
33750 return ggc_alloc_string (name, len - 4);
33751 else
33752 return name;
33755 /* Section attributes. AIX is always PIC. */
33757 static unsigned int
33758 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33760 unsigned int align;
33761 unsigned int flags = default_section_type_flags (decl, name, reloc);
33763 /* Align to at least UNIT size. */
33764 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33765 align = MIN_UNITS_PER_WORD;
33766 else
33767 /* Increase alignment of large objects if not already stricter. */
33768 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33769 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33770 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33772 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33775 /* Output at beginning of assembler file.
33777 Initialize the section names for the RS/6000 at this point.
33779 Specify filename, including full path, to assembler.
33781 We want to go into the TOC section so at least one .toc will be emitted.
33782 Also, in order to output proper .bs/.es pairs, we need at least one static
33783 [RW] section emitted.
33785 Finally, declare mcount when profiling to make the assembler happy. */
33787 static void
33788 rs6000_xcoff_file_start (void)
33790 rs6000_gen_section_name (&xcoff_bss_section_name,
33791 main_input_filename, ".bss_");
33792 rs6000_gen_section_name (&xcoff_private_data_section_name,
33793 main_input_filename, ".rw_");
33794 rs6000_gen_section_name (&xcoff_read_only_section_name,
33795 main_input_filename, ".ro_");
33796 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33797 main_input_filename, ".tls_");
33798 rs6000_gen_section_name (&xcoff_tbss_section_name,
33799 main_input_filename, ".tbss_[UL]");
33801 fputs ("\t.file\t", asm_out_file);
33802 output_quoted_string (asm_out_file, main_input_filename);
33803 fputc ('\n', asm_out_file);
33804 if (write_symbols != NO_DEBUG)
33805 switch_to_section (private_data_section);
33806 switch_to_section (toc_section);
33807 switch_to_section (text_section);
33808 if (profile_flag)
33809 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33810 rs6000_file_start ();
33813 /* Output at end of assembler file.
33814 On the RS/6000, referencing data should automatically pull in text. */
33816 static void
33817 rs6000_xcoff_file_end (void)
33819 switch_to_section (text_section);
33820 fputs ("_section_.text:\n", asm_out_file);
33821 switch_to_section (data_section);
33822 fputs (TARGET_32BIT
33823 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33824 asm_out_file);
33827 struct declare_alias_data
33829 FILE *file;
33830 bool function_descriptor;
33833 /* Declare alias N. A helper function for for_node_and_aliases. */
33835 static bool
33836 rs6000_declare_alias (struct symtab_node *n, void *d)
33838 struct declare_alias_data *data = (struct declare_alias_data *)d;
33839 /* Main symbol is output specially, because varasm machinery does part of
33840 the job for us - we do not need to declare .globl/lglobs and such. */
33841 if (!n->alias || n->weakref)
33842 return false;
33844 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33845 return false;
33847 /* Prevent assemble_alias from trying to use .set pseudo operation
33848 that does not behave as expected by the middle-end. */
33849 TREE_ASM_WRITTEN (n->decl) = true;
33851 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33852 char *buffer = (char *) alloca (strlen (name) + 2);
33853 char *p;
33854 int dollar_inside = 0;
33856 strcpy (buffer, name);
33857 p = strchr (buffer, '$');
33858 while (p) {
33859 *p = '_';
33860 dollar_inside++;
33861 p = strchr (p + 1, '$');
33863 if (TREE_PUBLIC (n->decl))
33865 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33867 if (dollar_inside) {
33868 if (data->function_descriptor)
33869 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33870 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33872 if (data->function_descriptor)
33874 fputs ("\t.globl .", data->file);
33875 RS6000_OUTPUT_BASENAME (data->file, buffer);
33876 putc ('\n', data->file);
33878 fputs ("\t.globl ", data->file);
33879 RS6000_OUTPUT_BASENAME (data->file, buffer);
33880 putc ('\n', data->file);
33882 #ifdef ASM_WEAKEN_DECL
33883 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33884 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33885 #endif
33887 else
33889 if (dollar_inside)
33891 if (data->function_descriptor)
33892 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33893 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33895 if (data->function_descriptor)
33897 fputs ("\t.lglobl .", data->file);
33898 RS6000_OUTPUT_BASENAME (data->file, buffer);
33899 putc ('\n', data->file);
33901 fputs ("\t.lglobl ", data->file);
33902 RS6000_OUTPUT_BASENAME (data->file, buffer);
33903 putc ('\n', data->file);
33905 if (data->function_descriptor)
33906 fputs (".", data->file);
33907 RS6000_OUTPUT_BASENAME (data->file, buffer);
33908 fputs (":\n", data->file);
33909 return false;
33913 #ifdef HAVE_GAS_HIDDEN
33914 /* Helper function to calculate visibility of a DECL
33915 and return the value as a const string. */
33917 static const char *
33918 rs6000_xcoff_visibility (tree decl)
33920 static const char * const visibility_types[] = {
33921 "", ",protected", ",hidden", ",internal"
33924 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33926 if (TREE_CODE (decl) == FUNCTION_DECL
33927 && cgraph_node::get (decl)
33928 && cgraph_node::get (decl)->instrumentation_clone
33929 && cgraph_node::get (decl)->instrumented_version)
33930 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
33932 return visibility_types[vis];
33934 #endif
33937 /* This macro produces the initial definition of a function name.
33938 On the RS/6000, we need to place an extra '.' in the function name and
33939 output the function descriptor.
33940 Dollar signs are converted to underscores.
33942 The csect for the function will have already been created when
33943 text_section was selected. We do have to go back to that csect, however.
33945 The third and fourth parameters to the .function pseudo-op (16 and 044)
33946 are placeholders which no longer have any use.
33948 Because AIX assembler's .set command has unexpected semantics, we output
33949 all aliases as alternative labels in front of the definition. */
33951 void
33952 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33954 char *buffer = (char *) alloca (strlen (name) + 1);
33955 char *p;
33956 int dollar_inside = 0;
33957 struct declare_alias_data data = {file, false};
33959 strcpy (buffer, name);
33960 p = strchr (buffer, '$');
33961 while (p) {
33962 *p = '_';
33963 dollar_inside++;
33964 p = strchr (p + 1, '$');
33966 if (TREE_PUBLIC (decl))
33968 if (!RS6000_WEAK || !DECL_WEAK (decl))
33970 if (dollar_inside) {
33971 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33972 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33974 fputs ("\t.globl .", file);
33975 RS6000_OUTPUT_BASENAME (file, buffer);
33976 #ifdef HAVE_GAS_HIDDEN
33977 fputs (rs6000_xcoff_visibility (decl), file);
33978 #endif
33979 putc ('\n', file);
33982 else
33984 if (dollar_inside) {
33985 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33986 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33988 fputs ("\t.lglobl .", file);
33989 RS6000_OUTPUT_BASENAME (file, buffer);
33990 putc ('\n', file);
33992 fputs ("\t.csect ", file);
33993 RS6000_OUTPUT_BASENAME (file, buffer);
33994 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33995 RS6000_OUTPUT_BASENAME (file, buffer);
33996 fputs (":\n", file);
33997 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33998 &data, true);
33999 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34000 RS6000_OUTPUT_BASENAME (file, buffer);
34001 fputs (", TOC[tc0], 0\n", file);
34002 in_section = NULL;
34003 switch_to_section (function_section (decl));
34004 putc ('.', file);
34005 RS6000_OUTPUT_BASENAME (file, buffer);
34006 fputs (":\n", file);
34007 data.function_descriptor = true;
34008 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34009 &data, true);
34010 if (!DECL_IGNORED_P (decl))
34012 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34013 xcoffout_declare_function (file, decl, buffer);
34014 else if (write_symbols == DWARF2_DEBUG)
34016 name = (*targetm.strip_name_encoding) (name);
34017 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34020 return;
34024 /* Output assembly language to globalize a symbol from a DECL,
34025 possibly with visibility. */
34027 void
34028 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34030 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34031 fputs (GLOBAL_ASM_OP, stream);
34032 RS6000_OUTPUT_BASENAME (stream, name);
34033 #ifdef HAVE_GAS_HIDDEN
34034 fputs (rs6000_xcoff_visibility (decl), stream);
34035 #endif
34036 putc ('\n', stream);
34039 /* Output assembly language to define a symbol as COMMON from a DECL,
34040 possibly with visibility. */
34042 void
34043 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34044 tree decl ATTRIBUTE_UNUSED,
34045 const char *name,
34046 unsigned HOST_WIDE_INT size,
34047 unsigned HOST_WIDE_INT align)
34049 unsigned HOST_WIDE_INT align2 = 2;
34051 if (align > 32)
34052 align2 = floor_log2 (align / BITS_PER_UNIT);
34053 else if (size > 4)
34054 align2 = 3;
34056 fputs (COMMON_ASM_OP, stream);
34057 RS6000_OUTPUT_BASENAME (stream, name);
34059 fprintf (stream,
34060 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34061 size, align2);
34063 #ifdef HAVE_GAS_HIDDEN
34064 fputs (rs6000_xcoff_visibility (decl), stream);
34065 #endif
34066 putc ('\n', stream);
34069 /* This macro produces the initial definition of a object (variable) name.
34070 Because AIX assembler's .set command has unexpected semantics, we output
34071 all aliases as alternative labels in front of the definition. */
34073 void
34074 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34076 struct declare_alias_data data = {file, false};
34077 RS6000_OUTPUT_BASENAME (file, name);
34078 fputs (":\n", file);
34079 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34080 &data, true);
34083 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34085 void
34086 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34088 fputs (integer_asm_op (size, FALSE), file);
34089 assemble_name (file, label);
34090 fputs ("-$", file);
34093 /* Output a symbol offset relative to the dbase for the current object.
34094 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34095 signed offsets.
34097 __gcc_unwind_dbase is embedded in all executables/libraries through
34098 libgcc/config/rs6000/crtdbase.S. */
34100 void
34101 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34103 fputs (integer_asm_op (size, FALSE), file);
34104 assemble_name (file, label);
34105 fputs("-__gcc_unwind_dbase", file);
34108 #ifdef HAVE_AS_TLS
34109 static void
34110 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34112 rtx symbol;
34113 int flags;
34114 const char *symname;
34116 default_encode_section_info (decl, rtl, first);
34118 /* Careful not to prod global register variables. */
34119 if (!MEM_P (rtl))
34120 return;
34121 symbol = XEXP (rtl, 0);
34122 if (GET_CODE (symbol) != SYMBOL_REF)
34123 return;
34125 flags = SYMBOL_REF_FLAGS (symbol);
34127 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34128 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34130 SYMBOL_REF_FLAGS (symbol) = flags;
34132 /* Append mapping class to extern decls. */
34133 symname = XSTR (symbol, 0);
34134 if (decl /* sync condition with assemble_external () */
34135 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34136 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34137 || TREE_CODE (decl) == FUNCTION_DECL)
34138 && symname[strlen (symname) - 1] != ']')
34140 char *newname = (char *) alloca (strlen (symname) + 5);
34141 strcpy (newname, symname);
34142 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34143 ? "[DS]" : "[UA]"));
34144 XSTR (symbol, 0) = ggc_strdup (newname);
34147 #endif /* HAVE_AS_TLS */
34148 #endif /* TARGET_XCOFF */
34150 void
34151 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34152 const char *name, const char *val)
34154 fputs ("\t.weak\t", stream);
34155 RS6000_OUTPUT_BASENAME (stream, name);
34156 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34157 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34159 if (TARGET_XCOFF)
34160 fputs ("[DS]", stream);
34161 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34162 if (TARGET_XCOFF)
34163 fputs (rs6000_xcoff_visibility (decl), stream);
34164 #endif
34165 fputs ("\n\t.weak\t.", stream);
34166 RS6000_OUTPUT_BASENAME (stream, name);
34168 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34169 if (TARGET_XCOFF)
34170 fputs (rs6000_xcoff_visibility (decl), stream);
34171 #endif
34172 fputc ('\n', stream);
34173 if (val)
34175 #ifdef ASM_OUTPUT_DEF
34176 ASM_OUTPUT_DEF (stream, name, val);
34177 #endif
34178 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34179 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34181 fputs ("\t.set\t.", stream);
34182 RS6000_OUTPUT_BASENAME (stream, name);
34183 fputs (",.", stream);
34184 RS6000_OUTPUT_BASENAME (stream, val);
34185 fputc ('\n', stream);
34191 /* Return true if INSN should not be copied. */
34193 static bool
34194 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34196 return recog_memoized (insn) >= 0
34197 && get_attr_cannot_copy (insn);
34200 /* Compute a (partial) cost for rtx X. Return true if the complete
34201 cost has been computed, and false if subexpressions should be
34202 scanned. In either case, *TOTAL contains the cost result. */
34204 static bool
34205 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34206 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34208 int code = GET_CODE (x);
34210 switch (code)
34212 /* On the RS/6000, if it is valid in the insn, it is free. */
34213 case CONST_INT:
34214 if (((outer_code == SET
34215 || outer_code == PLUS
34216 || outer_code == MINUS)
34217 && (satisfies_constraint_I (x)
34218 || satisfies_constraint_L (x)))
34219 || (outer_code == AND
34220 && (satisfies_constraint_K (x)
34221 || (mode == SImode
34222 ? satisfies_constraint_L (x)
34223 : satisfies_constraint_J (x))))
34224 || ((outer_code == IOR || outer_code == XOR)
34225 && (satisfies_constraint_K (x)
34226 || (mode == SImode
34227 ? satisfies_constraint_L (x)
34228 : satisfies_constraint_J (x))))
34229 || outer_code == ASHIFT
34230 || outer_code == ASHIFTRT
34231 || outer_code == LSHIFTRT
34232 || outer_code == ROTATE
34233 || outer_code == ROTATERT
34234 || outer_code == ZERO_EXTRACT
34235 || (outer_code == MULT
34236 && satisfies_constraint_I (x))
34237 || ((outer_code == DIV || outer_code == UDIV
34238 || outer_code == MOD || outer_code == UMOD)
34239 && exact_log2 (INTVAL (x)) >= 0)
34240 || (outer_code == COMPARE
34241 && (satisfies_constraint_I (x)
34242 || satisfies_constraint_K (x)))
34243 || ((outer_code == EQ || outer_code == NE)
34244 && (satisfies_constraint_I (x)
34245 || satisfies_constraint_K (x)
34246 || (mode == SImode
34247 ? satisfies_constraint_L (x)
34248 : satisfies_constraint_J (x))))
34249 || (outer_code == GTU
34250 && satisfies_constraint_I (x))
34251 || (outer_code == LTU
34252 && satisfies_constraint_P (x)))
34254 *total = 0;
34255 return true;
34257 else if ((outer_code == PLUS
34258 && reg_or_add_cint_operand (x, VOIDmode))
34259 || (outer_code == MINUS
34260 && reg_or_sub_cint_operand (x, VOIDmode))
34261 || ((outer_code == SET
34262 || outer_code == IOR
34263 || outer_code == XOR)
34264 && (INTVAL (x)
34265 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34267 *total = COSTS_N_INSNS (1);
34268 return true;
34270 /* FALLTHRU */
34272 case CONST_DOUBLE:
34273 case CONST_WIDE_INT:
34274 case CONST:
34275 case HIGH:
34276 case SYMBOL_REF:
34277 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34278 return true;
34280 case MEM:
34281 /* When optimizing for size, MEM should be slightly more expensive
34282 than generating address, e.g., (plus (reg) (const)).
34283 L1 cache latency is about two instructions. */
34284 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34285 if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
34286 *total += COSTS_N_INSNS (100);
34287 return true;
34289 case LABEL_REF:
34290 *total = 0;
34291 return true;
34293 case PLUS:
34294 case MINUS:
34295 if (FLOAT_MODE_P (mode))
34296 *total = rs6000_cost->fp;
34297 else
34298 *total = COSTS_N_INSNS (1);
34299 return false;
34301 case MULT:
34302 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34303 && satisfies_constraint_I (XEXP (x, 1)))
34305 if (INTVAL (XEXP (x, 1)) >= -256
34306 && INTVAL (XEXP (x, 1)) <= 255)
34307 *total = rs6000_cost->mulsi_const9;
34308 else
34309 *total = rs6000_cost->mulsi_const;
34311 else if (mode == SFmode)
34312 *total = rs6000_cost->fp;
34313 else if (FLOAT_MODE_P (mode))
34314 *total = rs6000_cost->dmul;
34315 else if (mode == DImode)
34316 *total = rs6000_cost->muldi;
34317 else
34318 *total = rs6000_cost->mulsi;
34319 return false;
34321 case FMA:
34322 if (mode == SFmode)
34323 *total = rs6000_cost->fp;
34324 else
34325 *total = rs6000_cost->dmul;
34326 break;
34328 case DIV:
34329 case MOD:
34330 if (FLOAT_MODE_P (mode))
34332 *total = mode == DFmode ? rs6000_cost->ddiv
34333 : rs6000_cost->sdiv;
34334 return false;
34336 /* FALLTHRU */
34338 case UDIV:
34339 case UMOD:
34340 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34341 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34343 if (code == DIV || code == MOD)
34344 /* Shift, addze */
34345 *total = COSTS_N_INSNS (2);
34346 else
34347 /* Shift */
34348 *total = COSTS_N_INSNS (1);
34350 else
34352 if (GET_MODE (XEXP (x, 1)) == DImode)
34353 *total = rs6000_cost->divdi;
34354 else
34355 *total = rs6000_cost->divsi;
34357 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34358 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34359 *total += COSTS_N_INSNS (2);
34360 return false;
34362 case CTZ:
34363 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34364 return false;
34366 case FFS:
34367 *total = COSTS_N_INSNS (4);
34368 return false;
34370 case POPCOUNT:
34371 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34372 return false;
34374 case PARITY:
34375 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34376 return false;
34378 case NOT:
34379 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34380 *total = 0;
34381 else
34382 *total = COSTS_N_INSNS (1);
34383 return false;
34385 case AND:
34386 if (CONST_INT_P (XEXP (x, 1)))
34388 rtx left = XEXP (x, 0);
34389 rtx_code left_code = GET_CODE (left);
34391 /* rotate-and-mask: 1 insn. */
34392 if ((left_code == ROTATE
34393 || left_code == ASHIFT
34394 || left_code == LSHIFTRT)
34395 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34397 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34398 if (!CONST_INT_P (XEXP (left, 1)))
34399 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34400 *total += COSTS_N_INSNS (1);
34401 return true;
34404 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34405 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34406 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34407 || (val & 0xffff) == val
34408 || (val & 0xffff0000) == val
34409 || ((val & 0xffff) == 0 && mode == SImode))
34411 *total = rtx_cost (left, mode, AND, 0, speed);
34412 *total += COSTS_N_INSNS (1);
34413 return true;
34416 /* 2 insns. */
34417 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34419 *total = rtx_cost (left, mode, AND, 0, speed);
34420 *total += COSTS_N_INSNS (2);
34421 return true;
34425 *total = COSTS_N_INSNS (1);
34426 return false;
34428 case IOR:
34429 /* FIXME */
34430 *total = COSTS_N_INSNS (1);
34431 return true;
34433 case CLZ:
34434 case XOR:
34435 case ZERO_EXTRACT:
34436 *total = COSTS_N_INSNS (1);
34437 return false;
34439 case ASHIFT:
34440 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34441 the sign extend and shift separately within the insn. */
34442 if (TARGET_EXTSWSLI && mode == DImode
34443 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34444 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34446 *total = 0;
34447 return false;
34449 /* fall through */
34451 case ASHIFTRT:
34452 case LSHIFTRT:
34453 case ROTATE:
34454 case ROTATERT:
34455 /* Handle mul_highpart. */
34456 if (outer_code == TRUNCATE
34457 && GET_CODE (XEXP (x, 0)) == MULT)
34459 if (mode == DImode)
34460 *total = rs6000_cost->muldi;
34461 else
34462 *total = rs6000_cost->mulsi;
34463 return true;
34465 else if (outer_code == AND)
34466 *total = 0;
34467 else
34468 *total = COSTS_N_INSNS (1);
34469 return false;
34471 case SIGN_EXTEND:
34472 case ZERO_EXTEND:
34473 if (GET_CODE (XEXP (x, 0)) == MEM)
34474 *total = 0;
34475 else
34476 *total = COSTS_N_INSNS (1);
34477 return false;
34479 case COMPARE:
34480 case NEG:
34481 case ABS:
34482 if (!FLOAT_MODE_P (mode))
34484 *total = COSTS_N_INSNS (1);
34485 return false;
34487 /* FALLTHRU */
34489 case FLOAT:
34490 case UNSIGNED_FLOAT:
34491 case FIX:
34492 case UNSIGNED_FIX:
34493 case FLOAT_TRUNCATE:
34494 *total = rs6000_cost->fp;
34495 return false;
34497 case FLOAT_EXTEND:
34498 if (mode == DFmode)
34499 *total = rs6000_cost->sfdf_convert;
34500 else
34501 *total = rs6000_cost->fp;
34502 return false;
34504 case UNSPEC:
34505 switch (XINT (x, 1))
34507 case UNSPEC_FRSP:
34508 *total = rs6000_cost->fp;
34509 return true;
34511 default:
34512 break;
34514 break;
34516 case CALL:
34517 case IF_THEN_ELSE:
34518 if (!speed)
34520 *total = COSTS_N_INSNS (1);
34521 return true;
34523 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34525 *total = rs6000_cost->fp;
34526 return false;
34528 break;
34530 case NE:
34531 case EQ:
34532 case GTU:
34533 case LTU:
34534 /* Carry bit requires mode == Pmode.
34535 NEG or PLUS already counted so only add one. */
34536 if (mode == Pmode
34537 && (outer_code == NEG || outer_code == PLUS))
34539 *total = COSTS_N_INSNS (1);
34540 return true;
34542 if (outer_code == SET)
34544 if (XEXP (x, 1) == const0_rtx)
34546 if (TARGET_ISEL && !TARGET_MFCRF)
34547 *total = COSTS_N_INSNS (8);
34548 else
34549 *total = COSTS_N_INSNS (2);
34550 return true;
34552 else
34554 *total = COSTS_N_INSNS (3);
34555 return false;
34558 /* FALLTHRU */
34560 case GT:
34561 case LT:
34562 case UNORDERED:
34563 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34565 if (TARGET_ISEL && !TARGET_MFCRF)
34566 *total = COSTS_N_INSNS (8);
34567 else
34568 *total = COSTS_N_INSNS (2);
34569 return true;
34571 /* CC COMPARE. */
34572 if (outer_code == COMPARE)
34574 *total = 0;
34575 return true;
34577 break;
34579 default:
34580 break;
34583 return false;
34586 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34588 static bool
34589 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34590 int opno, int *total, bool speed)
34592 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34594 fprintf (stderr,
34595 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34596 "opno = %d, total = %d, speed = %s, x:\n",
34597 ret ? "complete" : "scan inner",
34598 GET_MODE_NAME (mode),
34599 GET_RTX_NAME (outer_code),
34600 opno,
34601 *total,
34602 speed ? "true" : "false");
34604 debug_rtx (x);
34606 return ret;
34609 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34611 static int
34612 rs6000_debug_address_cost (rtx x, machine_mode mode,
34613 addr_space_t as, bool speed)
34615 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34617 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34618 ret, speed ? "true" : "false");
34619 debug_rtx (x);
34621 return ret;
34625 /* A C expression returning the cost of moving data from a register of class
34626 CLASS1 to one of CLASS2. */
34628 static int
34629 rs6000_register_move_cost (machine_mode mode,
34630 reg_class_t from, reg_class_t to)
34632 int ret;
34634 if (TARGET_DEBUG_COST)
34635 dbg_cost_ctrl++;
34637 /* Moves from/to GENERAL_REGS. */
34638 if (reg_classes_intersect_p (to, GENERAL_REGS)
34639 || reg_classes_intersect_p (from, GENERAL_REGS))
34641 reg_class_t rclass = from;
34643 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34644 rclass = to;
34646 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34647 ret = (rs6000_memory_move_cost (mode, rclass, false)
34648 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34650 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34651 shift. */
34652 else if (rclass == CR_REGS)
34653 ret = 4;
34655 /* For those processors that have slow LR/CTR moves, make them more
34656 expensive than memory in order to bias spills to memory .*/
34657 else if ((rs6000_cpu == PROCESSOR_POWER6
34658 || rs6000_cpu == PROCESSOR_POWER7
34659 || rs6000_cpu == PROCESSOR_POWER8
34660 || rs6000_cpu == PROCESSOR_POWER9)
34661 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34662 ret = 6 * hard_regno_nregs[0][mode];
34664 else
34665 /* A move will cost one instruction per GPR moved. */
34666 ret = 2 * hard_regno_nregs[0][mode];
34669 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34670 else if (VECTOR_MEM_VSX_P (mode)
34671 && reg_classes_intersect_p (to, VSX_REGS)
34672 && reg_classes_intersect_p (from, VSX_REGS))
34673 ret = 2 * hard_regno_nregs[FIRST_FPR_REGNO][mode];
34675 /* Moving between two similar registers is just one instruction. */
34676 else if (reg_classes_intersect_p (to, from))
34677 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34679 /* Everything else has to go through GENERAL_REGS. */
34680 else
34681 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34682 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34684 if (TARGET_DEBUG_COST)
34686 if (dbg_cost_ctrl == 1)
34687 fprintf (stderr,
34688 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34689 ret, GET_MODE_NAME (mode), reg_class_names[from],
34690 reg_class_names[to]);
34691 dbg_cost_ctrl--;
34694 return ret;
34697 /* A C expressions returning the cost of moving data of MODE from a register to
34698 or from memory. */
34700 static int
34701 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34702 bool in ATTRIBUTE_UNUSED)
34704 int ret;
34706 if (TARGET_DEBUG_COST)
34707 dbg_cost_ctrl++;
34709 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34710 ret = 4 * hard_regno_nregs[0][mode];
34711 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34712 || reg_classes_intersect_p (rclass, VSX_REGS)))
34713 ret = 4 * hard_regno_nregs[32][mode];
34714 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34715 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
34716 else
34717 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34719 if (TARGET_DEBUG_COST)
34721 if (dbg_cost_ctrl == 1)
34722 fprintf (stderr,
34723 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34724 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34725 dbg_cost_ctrl--;
34728 return ret;
34731 /* Returns a code for a target-specific builtin that implements
34732 reciprocal of the function, or NULL_TREE if not available. */
34734 static tree
34735 rs6000_builtin_reciprocal (tree fndecl)
34737 switch (DECL_FUNCTION_CODE (fndecl))
34739 case VSX_BUILTIN_XVSQRTDP:
34740 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34741 return NULL_TREE;
34743 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34745 case VSX_BUILTIN_XVSQRTSP:
34746 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34747 return NULL_TREE;
34749 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34751 default:
34752 return NULL_TREE;
34756 /* Load up a constant. If the mode is a vector mode, splat the value across
34757 all of the vector elements. */
34759 static rtx
34760 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34762 rtx reg;
34764 if (mode == SFmode || mode == DFmode)
34766 rtx d = const_double_from_real_value (dconst, mode);
34767 reg = force_reg (mode, d);
34769 else if (mode == V4SFmode)
34771 rtx d = const_double_from_real_value (dconst, SFmode);
34772 rtvec v = gen_rtvec (4, d, d, d, d);
34773 reg = gen_reg_rtx (mode);
34774 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34776 else if (mode == V2DFmode)
34778 rtx d = const_double_from_real_value (dconst, DFmode);
34779 rtvec v = gen_rtvec (2, d, d);
34780 reg = gen_reg_rtx (mode);
34781 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34783 else
34784 gcc_unreachable ();
34786 return reg;
34789 /* Generate an FMA instruction. */
34791 static void
34792 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34794 machine_mode mode = GET_MODE (target);
34795 rtx dst;
34797 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34798 gcc_assert (dst != NULL);
34800 if (dst != target)
34801 emit_move_insn (target, dst);
34804 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34806 static void
34807 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34809 machine_mode mode = GET_MODE (dst);
34810 rtx r;
34812 /* This is a tad more complicated, since the fnma_optab is for
34813 a different expression: fma(-m1, m2, a), which is the same
34814 thing except in the case of signed zeros.
34816 Fortunately we know that if FMA is supported that FNMSUB is
34817 also supported in the ISA. Just expand it directly. */
34819 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34821 r = gen_rtx_NEG (mode, a);
34822 r = gen_rtx_FMA (mode, m1, m2, r);
34823 r = gen_rtx_NEG (mode, r);
34824 emit_insn (gen_rtx_SET (dst, r));
34827 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34828 add a reg_note saying that this was a division. Support both scalar and
34829 vector divide. Assumes no trapping math and finite arguments. */
34831 void
34832 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34834 machine_mode mode = GET_MODE (dst);
34835 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34836 int i;
34838 /* Low precision estimates guarantee 5 bits of accuracy. High
34839 precision estimates guarantee 14 bits of accuracy. SFmode
34840 requires 23 bits of accuracy. DFmode requires 52 bits of
34841 accuracy. Each pass at least doubles the accuracy, leading
34842 to the following. */
34843 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34844 if (mode == DFmode || mode == V2DFmode)
34845 passes++;
34847 enum insn_code code = optab_handler (smul_optab, mode);
34848 insn_gen_fn gen_mul = GEN_FCN (code);
34850 gcc_assert (code != CODE_FOR_nothing);
34852 one = rs6000_load_constant_and_splat (mode, dconst1);
34854 /* x0 = 1./d estimate */
34855 x0 = gen_reg_rtx (mode);
34856 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34857 UNSPEC_FRES)));
34859 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34860 if (passes > 1) {
34862 /* e0 = 1. - d * x0 */
34863 e0 = gen_reg_rtx (mode);
34864 rs6000_emit_nmsub (e0, d, x0, one);
34866 /* x1 = x0 + e0 * x0 */
34867 x1 = gen_reg_rtx (mode);
34868 rs6000_emit_madd (x1, e0, x0, x0);
34870 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34871 ++i, xprev = xnext, eprev = enext) {
34873 /* enext = eprev * eprev */
34874 enext = gen_reg_rtx (mode);
34875 emit_insn (gen_mul (enext, eprev, eprev));
34877 /* xnext = xprev + enext * xprev */
34878 xnext = gen_reg_rtx (mode);
34879 rs6000_emit_madd (xnext, enext, xprev, xprev);
34882 } else
34883 xprev = x0;
34885 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34887 /* u = n * xprev */
34888 u = gen_reg_rtx (mode);
34889 emit_insn (gen_mul (u, n, xprev));
34891 /* v = n - (d * u) */
34892 v = gen_reg_rtx (mode);
34893 rs6000_emit_nmsub (v, d, u, n);
34895 /* dst = (v * xprev) + u */
34896 rs6000_emit_madd (dst, v, xprev, u);
34898 if (note_p)
34899 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34902 /* Goldschmidt's Algorithm for single/double-precision floating point
34903 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34905 void
34906 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34908 machine_mode mode = GET_MODE (src);
34909 rtx e = gen_reg_rtx (mode);
34910 rtx g = gen_reg_rtx (mode);
34911 rtx h = gen_reg_rtx (mode);
34913 /* Low precision estimates guarantee 5 bits of accuracy. High
34914 precision estimates guarantee 14 bits of accuracy. SFmode
34915 requires 23 bits of accuracy. DFmode requires 52 bits of
34916 accuracy. Each pass at least doubles the accuracy, leading
34917 to the following. */
34918 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34919 if (mode == DFmode || mode == V2DFmode)
34920 passes++;
34922 int i;
34923 rtx mhalf;
34924 enum insn_code code = optab_handler (smul_optab, mode);
34925 insn_gen_fn gen_mul = GEN_FCN (code);
34927 gcc_assert (code != CODE_FOR_nothing);
34929 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34931 /* e = rsqrt estimate */
34932 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34933 UNSPEC_RSQRT)));
34935 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34936 if (!recip)
34938 rtx zero = force_reg (mode, CONST0_RTX (mode));
34940 if (mode == SFmode)
34942 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34943 e, zero, mode, 0);
34944 if (target != e)
34945 emit_move_insn (e, target);
34947 else
34949 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34950 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34954 /* g = sqrt estimate. */
34955 emit_insn (gen_mul (g, e, src));
34956 /* h = 1/(2*sqrt) estimate. */
34957 emit_insn (gen_mul (h, e, mhalf));
34959 if (recip)
34961 if (passes == 1)
34963 rtx t = gen_reg_rtx (mode);
34964 rs6000_emit_nmsub (t, g, h, mhalf);
34965 /* Apply correction directly to 1/rsqrt estimate. */
34966 rs6000_emit_madd (dst, e, t, e);
34968 else
34970 for (i = 0; i < passes; i++)
34972 rtx t1 = gen_reg_rtx (mode);
34973 rtx g1 = gen_reg_rtx (mode);
34974 rtx h1 = gen_reg_rtx (mode);
34976 rs6000_emit_nmsub (t1, g, h, mhalf);
34977 rs6000_emit_madd (g1, g, t1, g);
34978 rs6000_emit_madd (h1, h, t1, h);
34980 g = g1;
34981 h = h1;
34983 /* Multiply by 2 for 1/rsqrt. */
34984 emit_insn (gen_add3_insn (dst, h, h));
34987 else
34989 rtx t = gen_reg_rtx (mode);
34990 rs6000_emit_nmsub (t, g, h, mhalf);
34991 rs6000_emit_madd (dst, g, t, g);
34994 return;
34997 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34998 (Power7) targets. DST is the target, and SRC is the argument operand. */
35000 void
35001 rs6000_emit_popcount (rtx dst, rtx src)
35003 machine_mode mode = GET_MODE (dst);
35004 rtx tmp1, tmp2;
35006 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35007 if (TARGET_POPCNTD)
35009 if (mode == SImode)
35010 emit_insn (gen_popcntdsi2 (dst, src));
35011 else
35012 emit_insn (gen_popcntddi2 (dst, src));
35013 return;
35016 tmp1 = gen_reg_rtx (mode);
35018 if (mode == SImode)
35020 emit_insn (gen_popcntbsi2 (tmp1, src));
35021 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35022 NULL_RTX, 0);
35023 tmp2 = force_reg (SImode, tmp2);
35024 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35026 else
35028 emit_insn (gen_popcntbdi2 (tmp1, src));
35029 tmp2 = expand_mult (DImode, tmp1,
35030 GEN_INT ((HOST_WIDE_INT)
35031 0x01010101 << 32 | 0x01010101),
35032 NULL_RTX, 0);
35033 tmp2 = force_reg (DImode, tmp2);
35034 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35039 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35040 target, and SRC is the argument operand. */
35042 void
35043 rs6000_emit_parity (rtx dst, rtx src)
35045 machine_mode mode = GET_MODE (dst);
35046 rtx tmp;
35048 tmp = gen_reg_rtx (mode);
35050 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35051 if (TARGET_CMPB)
35053 if (mode == SImode)
35055 emit_insn (gen_popcntbsi2 (tmp, src));
35056 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35058 else
35060 emit_insn (gen_popcntbdi2 (tmp, src));
35061 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35063 return;
35066 if (mode == SImode)
35068 /* Is mult+shift >= shift+xor+shift+xor? */
35069 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35071 rtx tmp1, tmp2, tmp3, tmp4;
35073 tmp1 = gen_reg_rtx (SImode);
35074 emit_insn (gen_popcntbsi2 (tmp1, src));
35076 tmp2 = gen_reg_rtx (SImode);
35077 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35078 tmp3 = gen_reg_rtx (SImode);
35079 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35081 tmp4 = gen_reg_rtx (SImode);
35082 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35083 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35085 else
35086 rs6000_emit_popcount (tmp, src);
35087 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35089 else
35091 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35092 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35094 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35096 tmp1 = gen_reg_rtx (DImode);
35097 emit_insn (gen_popcntbdi2 (tmp1, src));
35099 tmp2 = gen_reg_rtx (DImode);
35100 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35101 tmp3 = gen_reg_rtx (DImode);
35102 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35104 tmp4 = gen_reg_rtx (DImode);
35105 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35106 tmp5 = gen_reg_rtx (DImode);
35107 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35109 tmp6 = gen_reg_rtx (DImode);
35110 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35111 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35113 else
35114 rs6000_emit_popcount (tmp, src);
35115 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35119 /* Expand an Altivec constant permutation for little endian mode.
35120 There are two issues: First, the two input operands must be
35121 swapped so that together they form a double-wide array in LE
35122 order. Second, the vperm instruction has surprising behavior
35123 in LE mode: it interprets the elements of the source vectors
35124 in BE mode ("left to right") and interprets the elements of
35125 the destination vector in LE mode ("right to left"). To
35126 correct for this, we must subtract each element of the permute
35127 control vector from 31.
35129 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35130 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35131 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35132 serve as the permute control vector. Then, in BE mode,
35134 vperm 9,10,11,12
35136 places the desired result in vr9. However, in LE mode the
35137 vector contents will be
35139 vr10 = 00000003 00000002 00000001 00000000
35140 vr11 = 00000007 00000006 00000005 00000004
35142 The result of the vperm using the same permute control vector is
35144 vr9 = 05000000 07000000 01000000 03000000
35146 That is, the leftmost 4 bytes of vr10 are interpreted as the
35147 source for the rightmost 4 bytes of vr9, and so on.
35149 If we change the permute control vector to
35151 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35153 and issue
35155 vperm 9,11,10,12
35157 we get the desired
35159 vr9 = 00000006 00000004 00000002 00000000. */
35161 void
35162 altivec_expand_vec_perm_const_le (rtx operands[4])
35164 unsigned int i;
35165 rtx perm[16];
35166 rtx constv, unspec;
35167 rtx target = operands[0];
35168 rtx op0 = operands[1];
35169 rtx op1 = operands[2];
35170 rtx sel = operands[3];
35172 /* Unpack and adjust the constant selector. */
35173 for (i = 0; i < 16; ++i)
35175 rtx e = XVECEXP (sel, 0, i);
35176 unsigned int elt = 31 - (INTVAL (e) & 31);
35177 perm[i] = GEN_INT (elt);
35180 /* Expand to a permute, swapping the inputs and using the
35181 adjusted selector. */
35182 if (!REG_P (op0))
35183 op0 = force_reg (V16QImode, op0);
35184 if (!REG_P (op1))
35185 op1 = force_reg (V16QImode, op1);
35187 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35188 constv = force_reg (V16QImode, constv);
35189 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35190 UNSPEC_VPERM);
35191 if (!REG_P (target))
35193 rtx tmp = gen_reg_rtx (V16QImode);
35194 emit_move_insn (tmp, unspec);
35195 unspec = tmp;
35198 emit_move_insn (target, unspec);
35201 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35202 permute control vector. But here it's not a constant, so we must
35203 generate a vector NAND or NOR to do the adjustment. */
35205 void
35206 altivec_expand_vec_perm_le (rtx operands[4])
35208 rtx notx, iorx, unspec;
35209 rtx target = operands[0];
35210 rtx op0 = operands[1];
35211 rtx op1 = operands[2];
35212 rtx sel = operands[3];
35213 rtx tmp = target;
35214 rtx norreg = gen_reg_rtx (V16QImode);
35215 machine_mode mode = GET_MODE (target);
35217 /* Get everything in regs so the pattern matches. */
35218 if (!REG_P (op0))
35219 op0 = force_reg (mode, op0);
35220 if (!REG_P (op1))
35221 op1 = force_reg (mode, op1);
35222 if (!REG_P (sel))
35223 sel = force_reg (V16QImode, sel);
35224 if (!REG_P (target))
35225 tmp = gen_reg_rtx (mode);
35227 if (TARGET_P9_VECTOR)
35229 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
35230 UNSPEC_VPERMR);
35232 else
35234 /* Invert the selector with a VNAND if available, else a VNOR.
35235 The VNAND is preferred for future fusion opportunities. */
35236 notx = gen_rtx_NOT (V16QImode, sel);
35237 iorx = (TARGET_P8_VECTOR
35238 ? gen_rtx_IOR (V16QImode, notx, notx)
35239 : gen_rtx_AND (V16QImode, notx, notx));
35240 emit_insn (gen_rtx_SET (norreg, iorx));
35242 /* Permute with operands reversed and adjusted selector. */
35243 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35244 UNSPEC_VPERM);
35247 /* Copy into target, possibly by way of a register. */
35248 if (!REG_P (target))
35250 emit_move_insn (tmp, unspec);
35251 unspec = tmp;
35254 emit_move_insn (target, unspec);
35257 /* Expand an Altivec constant permutation. Return true if we match
35258 an efficient implementation; false to fall back to VPERM. */
35260 bool
35261 altivec_expand_vec_perm_const (rtx operands[4])
35263 struct altivec_perm_insn {
35264 HOST_WIDE_INT mask;
35265 enum insn_code impl;
35266 unsigned char perm[16];
35268 static const struct altivec_perm_insn patterns[] = {
35269 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35270 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35271 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35272 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35273 { OPTION_MASK_ALTIVEC,
35274 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35275 : CODE_FOR_altivec_vmrglb_direct),
35276 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35277 { OPTION_MASK_ALTIVEC,
35278 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35279 : CODE_FOR_altivec_vmrglh_direct),
35280 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35281 { OPTION_MASK_ALTIVEC,
35282 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35283 : CODE_FOR_altivec_vmrglw_direct),
35284 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35285 { OPTION_MASK_ALTIVEC,
35286 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35287 : CODE_FOR_altivec_vmrghb_direct),
35288 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35289 { OPTION_MASK_ALTIVEC,
35290 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35291 : CODE_FOR_altivec_vmrghh_direct),
35292 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35293 { OPTION_MASK_ALTIVEC,
35294 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35295 : CODE_FOR_altivec_vmrghw_direct),
35296 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35297 { OPTION_MASK_P8_VECTOR,
35298 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35299 : CODE_FOR_p8_vmrgow_v4sf_direct),
35300 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35301 { OPTION_MASK_P8_VECTOR,
35302 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35303 : CODE_FOR_p8_vmrgew_v4sf_direct),
35304 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35307 unsigned int i, j, elt, which;
35308 unsigned char perm[16];
35309 rtx target, op0, op1, sel, x;
35310 bool one_vec;
35312 target = operands[0];
35313 op0 = operands[1];
35314 op1 = operands[2];
35315 sel = operands[3];
35317 /* Unpack the constant selector. */
35318 for (i = which = 0; i < 16; ++i)
35320 rtx e = XVECEXP (sel, 0, i);
35321 elt = INTVAL (e) & 31;
35322 which |= (elt < 16 ? 1 : 2);
35323 perm[i] = elt;
35326 /* Simplify the constant selector based on operands. */
35327 switch (which)
35329 default:
35330 gcc_unreachable ();
35332 case 3:
35333 one_vec = false;
35334 if (!rtx_equal_p (op0, op1))
35335 break;
35336 /* FALLTHRU */
35338 case 2:
35339 for (i = 0; i < 16; ++i)
35340 perm[i] &= 15;
35341 op0 = op1;
35342 one_vec = true;
35343 break;
35345 case 1:
35346 op1 = op0;
35347 one_vec = true;
35348 break;
35351 /* Look for splat patterns. */
35352 if (one_vec)
35354 elt = perm[0];
35356 for (i = 0; i < 16; ++i)
35357 if (perm[i] != elt)
35358 break;
35359 if (i == 16)
35361 if (!BYTES_BIG_ENDIAN)
35362 elt = 15 - elt;
35363 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35364 return true;
35367 if (elt % 2 == 0)
35369 for (i = 0; i < 16; i += 2)
35370 if (perm[i] != elt || perm[i + 1] != elt + 1)
35371 break;
35372 if (i == 16)
35374 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35375 x = gen_reg_rtx (V8HImode);
35376 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35377 GEN_INT (field)));
35378 emit_move_insn (target, gen_lowpart (V16QImode, x));
35379 return true;
35383 if (elt % 4 == 0)
35385 for (i = 0; i < 16; i += 4)
35386 if (perm[i] != elt
35387 || perm[i + 1] != elt + 1
35388 || perm[i + 2] != elt + 2
35389 || perm[i + 3] != elt + 3)
35390 break;
35391 if (i == 16)
35393 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35394 x = gen_reg_rtx (V4SImode);
35395 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35396 GEN_INT (field)));
35397 emit_move_insn (target, gen_lowpart (V16QImode, x));
35398 return true;
35403 /* Look for merge and pack patterns. */
35404 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35406 bool swapped;
35408 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35409 continue;
35411 elt = patterns[j].perm[0];
35412 if (perm[0] == elt)
35413 swapped = false;
35414 else if (perm[0] == elt + 16)
35415 swapped = true;
35416 else
35417 continue;
35418 for (i = 1; i < 16; ++i)
35420 elt = patterns[j].perm[i];
35421 if (swapped)
35422 elt = (elt >= 16 ? elt - 16 : elt + 16);
35423 else if (one_vec && elt >= 16)
35424 elt -= 16;
35425 if (perm[i] != elt)
35426 break;
35428 if (i == 16)
35430 enum insn_code icode = patterns[j].impl;
35431 machine_mode omode = insn_data[icode].operand[0].mode;
35432 machine_mode imode = insn_data[icode].operand[1].mode;
35434 /* For little-endian, don't use vpkuwum and vpkuhum if the
35435 underlying vector type is not V4SI and V8HI, respectively.
35436 For example, using vpkuwum with a V8HI picks up the even
35437 halfwords (BE numbering) when the even halfwords (LE
35438 numbering) are what we need. */
35439 if (!BYTES_BIG_ENDIAN
35440 && icode == CODE_FOR_altivec_vpkuwum_direct
35441 && ((GET_CODE (op0) == REG
35442 && GET_MODE (op0) != V4SImode)
35443 || (GET_CODE (op0) == SUBREG
35444 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35445 continue;
35446 if (!BYTES_BIG_ENDIAN
35447 && icode == CODE_FOR_altivec_vpkuhum_direct
35448 && ((GET_CODE (op0) == REG
35449 && GET_MODE (op0) != V8HImode)
35450 || (GET_CODE (op0) == SUBREG
35451 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35452 continue;
35454 /* For little-endian, the two input operands must be swapped
35455 (or swapped back) to ensure proper right-to-left numbering
35456 from 0 to 2N-1. */
35457 if (swapped ^ !BYTES_BIG_ENDIAN)
35458 std::swap (op0, op1);
35459 if (imode != V16QImode)
35461 op0 = gen_lowpart (imode, op0);
35462 op1 = gen_lowpart (imode, op1);
35464 if (omode == V16QImode)
35465 x = target;
35466 else
35467 x = gen_reg_rtx (omode);
35468 emit_insn (GEN_FCN (icode) (x, op0, op1));
35469 if (omode != V16QImode)
35470 emit_move_insn (target, gen_lowpart (V16QImode, x));
35471 return true;
35475 if (!BYTES_BIG_ENDIAN)
35477 altivec_expand_vec_perm_const_le (operands);
35478 return true;
35481 return false;
35484 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35485 Return true if we match an efficient implementation. */
35487 static bool
35488 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35489 unsigned char perm0, unsigned char perm1)
35491 rtx x;
35493 /* If both selectors come from the same operand, fold to single op. */
35494 if ((perm0 & 2) == (perm1 & 2))
35496 if (perm0 & 2)
35497 op0 = op1;
35498 else
35499 op1 = op0;
35501 /* If both operands are equal, fold to simpler permutation. */
35502 if (rtx_equal_p (op0, op1))
35504 perm0 = perm0 & 1;
35505 perm1 = (perm1 & 1) + 2;
35507 /* If the first selector comes from the second operand, swap. */
35508 else if (perm0 & 2)
35510 if (perm1 & 2)
35511 return false;
35512 perm0 -= 2;
35513 perm1 += 2;
35514 std::swap (op0, op1);
35516 /* If the second selector does not come from the second operand, fail. */
35517 else if ((perm1 & 2) == 0)
35518 return false;
35520 /* Success! */
35521 if (target != NULL)
35523 machine_mode vmode, dmode;
35524 rtvec v;
35526 vmode = GET_MODE (target);
35527 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35528 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35529 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35530 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35531 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35532 emit_insn (gen_rtx_SET (target, x));
35534 return true;
35537 bool
35538 rs6000_expand_vec_perm_const (rtx operands[4])
35540 rtx target, op0, op1, sel;
35541 unsigned char perm0, perm1;
35543 target = operands[0];
35544 op0 = operands[1];
35545 op1 = operands[2];
35546 sel = operands[3];
35548 /* Unpack the constant selector. */
35549 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35550 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35552 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35555 /* Test whether a constant permutation is supported. */
35557 static bool
35558 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
35559 const unsigned char *sel)
35561 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35562 if (TARGET_ALTIVEC)
35563 return true;
35565 /* Check for ps_merge* or evmerge* insns. */
35566 if (TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35568 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35569 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35570 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35573 return false;
35576 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35578 static void
35579 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35580 machine_mode vmode, unsigned nelt, rtx perm[])
35582 machine_mode imode;
35583 rtx x;
35585 imode = vmode;
35586 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
35587 imode = mode_for_int_vector (vmode).require ();
35589 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
35590 x = expand_vec_perm (vmode, op0, op1, x, target);
35591 if (x != target)
35592 emit_move_insn (target, x);
35595 /* Expand an extract even operation. */
35597 void
35598 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35600 machine_mode vmode = GET_MODE (target);
35601 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35602 rtx perm[16];
35604 for (i = 0; i < nelt; i++)
35605 perm[i] = GEN_INT (i * 2);
35607 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35610 /* Expand a vector interleave operation. */
35612 void
35613 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35615 machine_mode vmode = GET_MODE (target);
35616 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35617 rtx perm[16];
35619 high = (highp ? 0 : nelt / 2);
35620 for (i = 0; i < nelt / 2; i++)
35622 perm[i * 2] = GEN_INT (i + high);
35623 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
35626 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35629 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35630 void
35631 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35633 HOST_WIDE_INT hwi_scale (scale);
35634 REAL_VALUE_TYPE r_pow;
35635 rtvec v = rtvec_alloc (2);
35636 rtx elt;
35637 rtx scale_vec = gen_reg_rtx (V2DFmode);
35638 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35639 elt = const_double_from_real_value (r_pow, DFmode);
35640 RTVEC_ELT (v, 0) = elt;
35641 RTVEC_ELT (v, 1) = elt;
35642 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35643 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35646 /* Return an RTX representing where to find the function value of a
35647 function returning MODE. */
35648 static rtx
35649 rs6000_complex_function_value (machine_mode mode)
35651 unsigned int regno;
35652 rtx r1, r2;
35653 machine_mode inner = GET_MODE_INNER (mode);
35654 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35656 if (TARGET_FLOAT128_TYPE
35657 && (mode == KCmode
35658 || (mode == TCmode && TARGET_IEEEQUAD)))
35659 regno = ALTIVEC_ARG_RETURN;
35661 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35662 regno = FP_ARG_RETURN;
35664 else
35666 regno = GP_ARG_RETURN;
35668 /* 32-bit is OK since it'll go in r3/r4. */
35669 if (TARGET_32BIT && inner_bytes >= 4)
35670 return gen_rtx_REG (mode, regno);
35673 if (inner_bytes >= 8)
35674 return gen_rtx_REG (mode, regno);
35676 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35677 const0_rtx);
35678 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35679 GEN_INT (inner_bytes));
35680 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35683 /* Return an rtx describing a return value of MODE as a PARALLEL
35684 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35685 stride REG_STRIDE. */
35687 static rtx
35688 rs6000_parallel_return (machine_mode mode,
35689 int n_elts, machine_mode elt_mode,
35690 unsigned int regno, unsigned int reg_stride)
35692 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35694 int i;
35695 for (i = 0; i < n_elts; i++)
35697 rtx r = gen_rtx_REG (elt_mode, regno);
35698 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35699 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35700 regno += reg_stride;
35703 return par;
35706 /* Target hook for TARGET_FUNCTION_VALUE.
35708 An integer value is in r3 and a floating-point value is in fp1,
35709 unless -msoft-float. */
35711 static rtx
35712 rs6000_function_value (const_tree valtype,
35713 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35714 bool outgoing ATTRIBUTE_UNUSED)
35716 machine_mode mode;
35717 unsigned int regno;
35718 machine_mode elt_mode;
35719 int n_elts;
35721 /* Special handling for structs in darwin64. */
35722 if (TARGET_MACHO
35723 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35725 CUMULATIVE_ARGS valcum;
35726 rtx valret;
35728 valcum.words = 0;
35729 valcum.fregno = FP_ARG_MIN_REG;
35730 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35731 /* Do a trial code generation as if this were going to be passed as
35732 an argument; if any part goes in memory, we return NULL. */
35733 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35734 if (valret)
35735 return valret;
35736 /* Otherwise fall through to standard ABI rules. */
35739 mode = TYPE_MODE (valtype);
35741 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35742 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35744 int first_reg, n_regs;
35746 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35748 /* _Decimal128 must use even/odd register pairs. */
35749 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35750 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35752 else
35754 first_reg = ALTIVEC_ARG_RETURN;
35755 n_regs = 1;
35758 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35761 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35762 if (TARGET_32BIT && TARGET_POWERPC64)
35763 switch (mode)
35765 default:
35766 break;
35767 case E_DImode:
35768 case E_SCmode:
35769 case E_DCmode:
35770 case E_TCmode:
35771 int count = GET_MODE_SIZE (mode) / 4;
35772 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35775 if ((INTEGRAL_TYPE_P (valtype)
35776 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35777 || POINTER_TYPE_P (valtype))
35778 mode = TARGET_32BIT ? SImode : DImode;
35780 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35781 /* _Decimal128 must use an even/odd register pair. */
35782 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35783 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35784 && !FLOAT128_VECTOR_P (mode)
35785 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35786 regno = FP_ARG_RETURN;
35787 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35788 && targetm.calls.split_complex_arg)
35789 return rs6000_complex_function_value (mode);
35790 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35791 return register is used in both cases, and we won't see V2DImode/V2DFmode
35792 for pure altivec, combine the two cases. */
35793 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35794 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35795 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35796 regno = ALTIVEC_ARG_RETURN;
35797 else
35798 regno = GP_ARG_RETURN;
35800 return gen_rtx_REG (mode, regno);
35803 /* Define how to find the value returned by a library function
35804 assuming the value has mode MODE. */
35806 rs6000_libcall_value (machine_mode mode)
35808 unsigned int regno;
35810 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35811 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35812 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35814 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35815 /* _Decimal128 must use an even/odd register pair. */
35816 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35817 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35818 && TARGET_HARD_FLOAT
35819 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35820 regno = FP_ARG_RETURN;
35821 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35822 return register is used in both cases, and we won't see V2DImode/V2DFmode
35823 for pure altivec, combine the two cases. */
35824 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35825 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35826 regno = ALTIVEC_ARG_RETURN;
35827 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35828 return rs6000_complex_function_value (mode);
35829 else
35830 regno = GP_ARG_RETURN;
35832 return gen_rtx_REG (mode, regno);
35835 /* Compute register pressure classes. We implement the target hook to avoid
35836 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35837 lead to incorrect estimates of number of available registers and therefor
35838 increased register pressure/spill. */
35839 static int
35840 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35842 int n;
35844 n = 0;
35845 pressure_classes[n++] = GENERAL_REGS;
35846 if (TARGET_VSX)
35847 pressure_classes[n++] = VSX_REGS;
35848 else
35850 if (TARGET_ALTIVEC)
35851 pressure_classes[n++] = ALTIVEC_REGS;
35852 if (TARGET_HARD_FLOAT)
35853 pressure_classes[n++] = FLOAT_REGS;
35855 pressure_classes[n++] = CR_REGS;
35856 pressure_classes[n++] = SPECIAL_REGS;
35858 return n;
35861 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35862 Frame pointer elimination is automatically handled.
35864 For the RS/6000, if frame pointer elimination is being done, we would like
35865 to convert ap into fp, not sp.
35867 We need r30 if -mminimal-toc was specified, and there are constant pool
35868 references. */
35870 static bool
35871 rs6000_can_eliminate (const int from, const int to)
35873 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35874 ? ! frame_pointer_needed
35875 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35876 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35877 || constant_pool_empty_p ()
35878 : true);
35881 /* Define the offset between two registers, FROM to be eliminated and its
35882 replacement TO, at the start of a routine. */
35883 HOST_WIDE_INT
35884 rs6000_initial_elimination_offset (int from, int to)
35886 rs6000_stack_t *info = rs6000_stack_info ();
35887 HOST_WIDE_INT offset;
35889 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35890 offset = info->push_p ? 0 : -info->total_size;
35891 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35893 offset = info->push_p ? 0 : -info->total_size;
35894 if (FRAME_GROWS_DOWNWARD)
35895 offset += info->fixed_size + info->vars_size + info->parm_size;
35897 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35898 offset = FRAME_GROWS_DOWNWARD
35899 ? info->fixed_size + info->vars_size + info->parm_size
35900 : 0;
35901 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35902 offset = info->total_size;
35903 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35904 offset = info->push_p ? info->total_size : 0;
35905 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35906 offset = 0;
35907 else
35908 gcc_unreachable ();
35910 return offset;
35913 /* Fill in sizes of registers used by unwinder. */
35915 static void
35916 rs6000_init_dwarf_reg_sizes_extra (tree address)
35918 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35920 int i;
35921 machine_mode mode = TYPE_MODE (char_type_node);
35922 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35923 rtx mem = gen_rtx_MEM (BLKmode, addr);
35924 rtx value = gen_int_mode (16, mode);
35926 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35927 The unwinder still needs to know the size of Altivec registers. */
35929 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35931 int column = DWARF_REG_TO_UNWIND_COLUMN
35932 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35933 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35935 emit_move_insn (adjust_address (mem, mode, offset), value);
35940 /* Map internal gcc register numbers to debug format register numbers.
35941 FORMAT specifies the type of debug register number to use:
35942 0 -- debug information, except for frame-related sections
35943 1 -- DWARF .debug_frame section
35944 2 -- DWARF .eh_frame section */
35946 unsigned int
35947 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35949 /* Except for the above, we use the internal number for non-DWARF
35950 debug information, and also for .eh_frame. */
35951 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35952 return regno;
35954 /* On some platforms, we use the standard DWARF register
35955 numbering for .debug_info and .debug_frame. */
35956 #ifdef RS6000_USE_DWARF_NUMBERING
35957 if (regno <= 63)
35958 return regno;
35959 if (regno == LR_REGNO)
35960 return 108;
35961 if (regno == CTR_REGNO)
35962 return 109;
35963 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35964 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35965 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35966 to the DWARF reg for CR. */
35967 if (format == 1 && regno == CR2_REGNO)
35968 return 64;
35969 if (CR_REGNO_P (regno))
35970 return regno - CR0_REGNO + 86;
35971 if (regno == CA_REGNO)
35972 return 101; /* XER */
35973 if (ALTIVEC_REGNO_P (regno))
35974 return regno - FIRST_ALTIVEC_REGNO + 1124;
35975 if (regno == VRSAVE_REGNO)
35976 return 356;
35977 if (regno == VSCR_REGNO)
35978 return 67;
35979 #endif
35980 return regno;
35983 /* target hook eh_return_filter_mode */
35984 static scalar_int_mode
35985 rs6000_eh_return_filter_mode (void)
35987 return TARGET_32BIT ? SImode : word_mode;
35990 /* Target hook for scalar_mode_supported_p. */
35991 static bool
35992 rs6000_scalar_mode_supported_p (scalar_mode mode)
35994 /* -m32 does not support TImode. This is the default, from
35995 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35996 same ABI as for -m32. But default_scalar_mode_supported_p allows
35997 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35998 for -mpowerpc64. */
35999 if (TARGET_32BIT && mode == TImode)
36000 return false;
36002 if (DECIMAL_FLOAT_MODE_P (mode))
36003 return default_decimal_float_supported_p ();
36004 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36005 return true;
36006 else
36007 return default_scalar_mode_supported_p (mode);
36010 /* Target hook for vector_mode_supported_p. */
36011 static bool
36012 rs6000_vector_mode_supported_p (machine_mode mode)
36015 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
36016 return true;
36018 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36019 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36020 double-double. */
36021 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36022 return true;
36024 else
36025 return false;
36028 /* Target hook for floatn_mode. */
36029 static opt_scalar_float_mode
36030 rs6000_floatn_mode (int n, bool extended)
36032 if (extended)
36034 switch (n)
36036 case 32:
36037 return DFmode;
36039 case 64:
36040 if (TARGET_FLOAT128_KEYWORD)
36041 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36042 else
36043 return opt_scalar_float_mode ();
36045 case 128:
36046 return opt_scalar_float_mode ();
36048 default:
36049 /* Those are the only valid _FloatNx types. */
36050 gcc_unreachable ();
36053 else
36055 switch (n)
36057 case 32:
36058 return SFmode;
36060 case 64:
36061 return DFmode;
36063 case 128:
36064 if (TARGET_FLOAT128_KEYWORD)
36065 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36066 else
36067 return opt_scalar_float_mode ();
36069 default:
36070 return opt_scalar_float_mode ();
36076 /* Target hook for c_mode_for_suffix. */
36077 static machine_mode
36078 rs6000_c_mode_for_suffix (char suffix)
36080 if (TARGET_FLOAT128_TYPE)
36082 if (suffix == 'q' || suffix == 'Q')
36083 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36085 /* At the moment, we are not defining a suffix for IBM extended double.
36086 If/when the default for -mabi=ieeelongdouble is changed, and we want
36087 to support __ibm128 constants in legacy library code, we may need to
36088 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36089 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36090 __float80 constants. */
36093 return VOIDmode;
36096 /* Target hook for invalid_arg_for_unprototyped_fn. */
36097 static const char *
36098 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36100 return (!rs6000_darwin64_abi
36101 && typelist == 0
36102 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36103 && (funcdecl == NULL_TREE
36104 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36105 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36106 ? N_("AltiVec argument passed to unprototyped function")
36107 : NULL;
36110 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36111 setup by using __stack_chk_fail_local hidden function instead of
36112 calling __stack_chk_fail directly. Otherwise it is better to call
36113 __stack_chk_fail directly. */
36115 static tree ATTRIBUTE_UNUSED
36116 rs6000_stack_protect_fail (void)
36118 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36119 ? default_hidden_stack_protect_fail ()
36120 : default_external_stack_protect_fail ();
36123 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36125 #if TARGET_ELF
36126 static unsigned HOST_WIDE_INT
36127 rs6000_asan_shadow_offset (void)
36129 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36131 #endif
36133 /* Mask options that we want to support inside of attribute((target)) and
36134 #pragma GCC target operations. Note, we do not include things like
36135 64/32-bit, endianness, hard/soft floating point, etc. that would have
36136 different calling sequences. */
36138 struct rs6000_opt_mask {
36139 const char *name; /* option name */
36140 HOST_WIDE_INT mask; /* mask to set */
36141 bool invert; /* invert sense of mask */
36142 bool valid_target; /* option is a target option */
36145 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36147 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36148 { "cmpb", OPTION_MASK_CMPB, false, true },
36149 { "crypto", OPTION_MASK_CRYPTO, false, true },
36150 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36151 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36152 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36153 false, true },
36154 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, false },
36155 { "float128-type", OPTION_MASK_FLOAT128_TYPE, false, false },
36156 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
36157 { "fprnd", OPTION_MASK_FPRND, false, true },
36158 { "hard-dfp", OPTION_MASK_DFP, false, true },
36159 { "htm", OPTION_MASK_HTM, false, true },
36160 { "isel", OPTION_MASK_ISEL, false, true },
36161 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36162 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36163 { "modulo", OPTION_MASK_MODULO, false, true },
36164 { "mulhw", OPTION_MASK_MULHW, false, true },
36165 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36166 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36167 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36168 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36169 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36170 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36171 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36172 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36173 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36174 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36175 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36176 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36177 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36178 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36179 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36180 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36181 { "string", OPTION_MASK_STRING, false, true },
36182 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36183 { "update", OPTION_MASK_NO_UPDATE, true , true },
36184 { "vsx", OPTION_MASK_VSX, false, true },
36185 #ifdef OPTION_MASK_64BIT
36186 #if TARGET_AIX_OS
36187 { "aix64", OPTION_MASK_64BIT, false, false },
36188 { "aix32", OPTION_MASK_64BIT, true, false },
36189 #else
36190 { "64", OPTION_MASK_64BIT, false, false },
36191 { "32", OPTION_MASK_64BIT, true, false },
36192 #endif
36193 #endif
36194 #ifdef OPTION_MASK_EABI
36195 { "eabi", OPTION_MASK_EABI, false, false },
36196 #endif
36197 #ifdef OPTION_MASK_LITTLE_ENDIAN
36198 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36199 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36200 #endif
36201 #ifdef OPTION_MASK_RELOCATABLE
36202 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36203 #endif
36204 #ifdef OPTION_MASK_STRICT_ALIGN
36205 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36206 #endif
36207 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36208 { "string", OPTION_MASK_STRING, false, false },
36211 /* Builtin mask mapping for printing the flags. */
36212 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36214 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36215 { "vsx", RS6000_BTM_VSX, false, false },
36216 { "paired", RS6000_BTM_PAIRED, false, false },
36217 { "fre", RS6000_BTM_FRE, false, false },
36218 { "fres", RS6000_BTM_FRES, false, false },
36219 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36220 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36221 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36222 { "cell", RS6000_BTM_CELL, false, false },
36223 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36224 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36225 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36226 { "crypto", RS6000_BTM_CRYPTO, false, false },
36227 { "htm", RS6000_BTM_HTM, false, false },
36228 { "hard-dfp", RS6000_BTM_DFP, false, false },
36229 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36230 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36231 { "float128", RS6000_BTM_FLOAT128, false, false },
36234 /* Option variables that we want to support inside attribute((target)) and
36235 #pragma GCC target operations. */
36237 struct rs6000_opt_var {
36238 const char *name; /* option name */
36239 size_t global_offset; /* offset of the option in global_options. */
36240 size_t target_offset; /* offset of the option in target options. */
36243 static struct rs6000_opt_var const rs6000_opt_vars[] =
36245 { "friz",
36246 offsetof (struct gcc_options, x_TARGET_FRIZ),
36247 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36248 { "avoid-indexed-addresses",
36249 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36250 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36251 { "paired",
36252 offsetof (struct gcc_options, x_rs6000_paired_float),
36253 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36254 { "longcall",
36255 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36256 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36257 { "optimize-swaps",
36258 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36259 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36260 { "allow-movmisalign",
36261 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36262 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36263 { "sched-groups",
36264 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36265 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36266 { "always-hint",
36267 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36268 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36269 { "align-branch-targets",
36270 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36271 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36272 { "tls-markers",
36273 offsetof (struct gcc_options, x_tls_markers),
36274 offsetof (struct cl_target_option, x_tls_markers), },
36275 { "sched-prolog",
36276 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36277 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36278 { "sched-epilog",
36279 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36280 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36283 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36284 parsing. Return true if there were no errors. */
36286 static bool
36287 rs6000_inner_target_options (tree args, bool attr_p)
36289 bool ret = true;
36291 if (args == NULL_TREE)
36294 else if (TREE_CODE (args) == STRING_CST)
36296 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36297 char *q;
36299 while ((q = strtok (p, ",")) != NULL)
36301 bool error_p = false;
36302 bool not_valid_p = false;
36303 const char *cpu_opt = NULL;
36305 p = NULL;
36306 if (strncmp (q, "cpu=", 4) == 0)
36308 int cpu_index = rs6000_cpu_name_lookup (q+4);
36309 if (cpu_index >= 0)
36310 rs6000_cpu_index = cpu_index;
36311 else
36313 error_p = true;
36314 cpu_opt = q+4;
36317 else if (strncmp (q, "tune=", 5) == 0)
36319 int tune_index = rs6000_cpu_name_lookup (q+5);
36320 if (tune_index >= 0)
36321 rs6000_tune_index = tune_index;
36322 else
36324 error_p = true;
36325 cpu_opt = q+5;
36328 else
36330 size_t i;
36331 bool invert = false;
36332 char *r = q;
36334 error_p = true;
36335 if (strncmp (r, "no-", 3) == 0)
36337 invert = true;
36338 r += 3;
36341 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36342 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36344 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36346 if (!rs6000_opt_masks[i].valid_target)
36347 not_valid_p = true;
36348 else
36350 error_p = false;
36351 rs6000_isa_flags_explicit |= mask;
36353 /* VSX needs altivec, so -mvsx automagically sets
36354 altivec and disables -mavoid-indexed-addresses. */
36355 if (!invert)
36357 if (mask == OPTION_MASK_VSX)
36359 mask |= OPTION_MASK_ALTIVEC;
36360 TARGET_AVOID_XFORM = 0;
36364 if (rs6000_opt_masks[i].invert)
36365 invert = !invert;
36367 if (invert)
36368 rs6000_isa_flags &= ~mask;
36369 else
36370 rs6000_isa_flags |= mask;
36372 break;
36375 if (error_p && !not_valid_p)
36377 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36378 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36380 size_t j = rs6000_opt_vars[i].global_offset;
36381 *((int *) ((char *)&global_options + j)) = !invert;
36382 error_p = false;
36383 not_valid_p = false;
36384 break;
36389 if (error_p)
36391 const char *eprefix, *esuffix;
36393 ret = false;
36394 if (attr_p)
36396 eprefix = "__attribute__((__target__(";
36397 esuffix = ")))";
36399 else
36401 eprefix = "#pragma GCC target ";
36402 esuffix = "";
36405 if (cpu_opt)
36406 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36407 q, esuffix);
36408 else if (not_valid_p)
36409 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36410 else
36411 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36416 else if (TREE_CODE (args) == TREE_LIST)
36420 tree value = TREE_VALUE (args);
36421 if (value)
36423 bool ret2 = rs6000_inner_target_options (value, attr_p);
36424 if (!ret2)
36425 ret = false;
36427 args = TREE_CHAIN (args);
36429 while (args != NULL_TREE);
36432 else
36434 error ("attribute %<target%> argument not a string");
36435 return false;
36438 return ret;
36441 /* Print out the target options as a list for -mdebug=target. */
36443 static void
36444 rs6000_debug_target_options (tree args, const char *prefix)
36446 if (args == NULL_TREE)
36447 fprintf (stderr, "%s<NULL>", prefix);
36449 else if (TREE_CODE (args) == STRING_CST)
36451 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36452 char *q;
36454 while ((q = strtok (p, ",")) != NULL)
36456 p = NULL;
36457 fprintf (stderr, "%s\"%s\"", prefix, q);
36458 prefix = ", ";
36462 else if (TREE_CODE (args) == TREE_LIST)
36466 tree value = TREE_VALUE (args);
36467 if (value)
36469 rs6000_debug_target_options (value, prefix);
36470 prefix = ", ";
36472 args = TREE_CHAIN (args);
36474 while (args != NULL_TREE);
36477 else
36478 gcc_unreachable ();
36480 return;
36484 /* Hook to validate attribute((target("..."))). */
36486 static bool
36487 rs6000_valid_attribute_p (tree fndecl,
36488 tree ARG_UNUSED (name),
36489 tree args,
36490 int flags)
36492 struct cl_target_option cur_target;
36493 bool ret;
36494 tree old_optimize = build_optimization_node (&global_options);
36495 tree new_target, new_optimize;
36496 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36498 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36500 if (TARGET_DEBUG_TARGET)
36502 tree tname = DECL_NAME (fndecl);
36503 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36504 if (tname)
36505 fprintf (stderr, "function: %.*s\n",
36506 (int) IDENTIFIER_LENGTH (tname),
36507 IDENTIFIER_POINTER (tname));
36508 else
36509 fprintf (stderr, "function: unknown\n");
36511 fprintf (stderr, "args:");
36512 rs6000_debug_target_options (args, " ");
36513 fprintf (stderr, "\n");
36515 if (flags)
36516 fprintf (stderr, "flags: 0x%x\n", flags);
36518 fprintf (stderr, "--------------------\n");
36521 /* attribute((target("default"))) does nothing, beyond
36522 affecting multi-versioning. */
36523 if (TREE_VALUE (args)
36524 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36525 && TREE_CHAIN (args) == NULL_TREE
36526 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36527 return true;
36529 old_optimize = build_optimization_node (&global_options);
36530 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36532 /* If the function changed the optimization levels as well as setting target
36533 options, start with the optimizations specified. */
36534 if (func_optimize && func_optimize != old_optimize)
36535 cl_optimization_restore (&global_options,
36536 TREE_OPTIMIZATION (func_optimize));
36538 /* The target attributes may also change some optimization flags, so update
36539 the optimization options if necessary. */
36540 cl_target_option_save (&cur_target, &global_options);
36541 rs6000_cpu_index = rs6000_tune_index = -1;
36542 ret = rs6000_inner_target_options (args, true);
36544 /* Set up any additional state. */
36545 if (ret)
36547 ret = rs6000_option_override_internal (false);
36548 new_target = build_target_option_node (&global_options);
36550 else
36551 new_target = NULL;
36553 new_optimize = build_optimization_node (&global_options);
36555 if (!new_target)
36556 ret = false;
36558 else if (fndecl)
36560 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36562 if (old_optimize != new_optimize)
36563 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36566 cl_target_option_restore (&global_options, &cur_target);
36568 if (old_optimize != new_optimize)
36569 cl_optimization_restore (&global_options,
36570 TREE_OPTIMIZATION (old_optimize));
36572 return ret;
36576 /* Hook to validate the current #pragma GCC target and set the state, and
36577 update the macros based on what was changed. If ARGS is NULL, then
36578 POP_TARGET is used to reset the options. */
36580 bool
36581 rs6000_pragma_target_parse (tree args, tree pop_target)
36583 tree prev_tree = build_target_option_node (&global_options);
36584 tree cur_tree;
36585 struct cl_target_option *prev_opt, *cur_opt;
36586 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36587 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36589 if (TARGET_DEBUG_TARGET)
36591 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36592 fprintf (stderr, "args:");
36593 rs6000_debug_target_options (args, " ");
36594 fprintf (stderr, "\n");
36596 if (pop_target)
36598 fprintf (stderr, "pop_target:\n");
36599 debug_tree (pop_target);
36601 else
36602 fprintf (stderr, "pop_target: <NULL>\n");
36604 fprintf (stderr, "--------------------\n");
36607 if (! args)
36609 cur_tree = ((pop_target)
36610 ? pop_target
36611 : target_option_default_node);
36612 cl_target_option_restore (&global_options,
36613 TREE_TARGET_OPTION (cur_tree));
36615 else
36617 rs6000_cpu_index = rs6000_tune_index = -1;
36618 if (!rs6000_inner_target_options (args, false)
36619 || !rs6000_option_override_internal (false)
36620 || (cur_tree = build_target_option_node (&global_options))
36621 == NULL_TREE)
36623 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36624 fprintf (stderr, "invalid pragma\n");
36626 return false;
36630 target_option_current_node = cur_tree;
36632 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36633 change the macros that are defined. */
36634 if (rs6000_target_modify_macros_ptr)
36636 prev_opt = TREE_TARGET_OPTION (prev_tree);
36637 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36638 prev_flags = prev_opt->x_rs6000_isa_flags;
36640 cur_opt = TREE_TARGET_OPTION (cur_tree);
36641 cur_flags = cur_opt->x_rs6000_isa_flags;
36642 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36644 diff_bumask = (prev_bumask ^ cur_bumask);
36645 diff_flags = (prev_flags ^ cur_flags);
36647 if ((diff_flags != 0) || (diff_bumask != 0))
36649 /* Delete old macros. */
36650 rs6000_target_modify_macros_ptr (false,
36651 prev_flags & diff_flags,
36652 prev_bumask & diff_bumask);
36654 /* Define new macros. */
36655 rs6000_target_modify_macros_ptr (true,
36656 cur_flags & diff_flags,
36657 cur_bumask & diff_bumask);
36661 return true;
36665 /* Remember the last target of rs6000_set_current_function. */
36666 static GTY(()) tree rs6000_previous_fndecl;
36668 /* Restore target's globals from NEW_TREE and invalidate the
36669 rs6000_previous_fndecl cache. */
36671 static void
36672 rs6000_activate_target_options (tree new_tree)
36674 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36675 if (TREE_TARGET_GLOBALS (new_tree))
36676 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36677 else if (new_tree == target_option_default_node)
36678 restore_target_globals (&default_target_globals);
36679 else
36680 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36681 rs6000_previous_fndecl = NULL_TREE;
36684 /* Establish appropriate back-end context for processing the function
36685 FNDECL. The argument might be NULL to indicate processing at top
36686 level, outside of any function scope. */
36687 static void
36688 rs6000_set_current_function (tree fndecl)
36690 if (TARGET_DEBUG_TARGET)
36692 fprintf (stderr, "\n==================== rs6000_set_current_function");
36694 if (fndecl)
36695 fprintf (stderr, ", fndecl %s (%p)",
36696 (DECL_NAME (fndecl)
36697 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36698 : "<unknown>"), (void *)fndecl);
36700 if (rs6000_previous_fndecl)
36701 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36703 fprintf (stderr, "\n");
36706 /* Only change the context if the function changes. This hook is called
36707 several times in the course of compiling a function, and we don't want to
36708 slow things down too much or call target_reinit when it isn't safe. */
36709 if (fndecl == rs6000_previous_fndecl)
36710 return;
36712 tree old_tree;
36713 if (rs6000_previous_fndecl == NULL_TREE)
36714 old_tree = target_option_current_node;
36715 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36716 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36717 else
36718 old_tree = target_option_default_node;
36720 tree new_tree;
36721 if (fndecl == NULL_TREE)
36723 if (old_tree != target_option_current_node)
36724 new_tree = target_option_current_node;
36725 else
36726 new_tree = NULL_TREE;
36728 else
36730 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36731 if (new_tree == NULL_TREE)
36732 new_tree = target_option_default_node;
36735 if (TARGET_DEBUG_TARGET)
36737 if (new_tree)
36739 fprintf (stderr, "\nnew fndecl target specific options:\n");
36740 debug_tree (new_tree);
36743 if (old_tree)
36745 fprintf (stderr, "\nold fndecl target specific options:\n");
36746 debug_tree (old_tree);
36749 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36750 fprintf (stderr, "--------------------\n");
36753 if (new_tree && old_tree != new_tree)
36754 rs6000_activate_target_options (new_tree);
36756 if (fndecl)
36757 rs6000_previous_fndecl = fndecl;
36761 /* Save the current options */
36763 static void
36764 rs6000_function_specific_save (struct cl_target_option *ptr,
36765 struct gcc_options *opts)
36767 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36768 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36771 /* Restore the current options */
36773 static void
36774 rs6000_function_specific_restore (struct gcc_options *opts,
36775 struct cl_target_option *ptr)
36778 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36779 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36780 (void) rs6000_option_override_internal (false);
36783 /* Print the current options */
36785 static void
36786 rs6000_function_specific_print (FILE *file, int indent,
36787 struct cl_target_option *ptr)
36789 rs6000_print_isa_options (file, indent, "Isa options set",
36790 ptr->x_rs6000_isa_flags);
36792 rs6000_print_isa_options (file, indent, "Isa options explicit",
36793 ptr->x_rs6000_isa_flags_explicit);
36796 /* Helper function to print the current isa or misc options on a line. */
36798 static void
36799 rs6000_print_options_internal (FILE *file,
36800 int indent,
36801 const char *string,
36802 HOST_WIDE_INT flags,
36803 const char *prefix,
36804 const struct rs6000_opt_mask *opts,
36805 size_t num_elements)
36807 size_t i;
36808 size_t start_column = 0;
36809 size_t cur_column;
36810 size_t max_column = 120;
36811 size_t prefix_len = strlen (prefix);
36812 size_t comma_len = 0;
36813 const char *comma = "";
36815 if (indent)
36816 start_column += fprintf (file, "%*s", indent, "");
36818 if (!flags)
36820 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36821 return;
36824 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36826 /* Print the various mask options. */
36827 cur_column = start_column;
36828 for (i = 0; i < num_elements; i++)
36830 bool invert = opts[i].invert;
36831 const char *name = opts[i].name;
36832 const char *no_str = "";
36833 HOST_WIDE_INT mask = opts[i].mask;
36834 size_t len = comma_len + prefix_len + strlen (name);
36836 if (!invert)
36838 if ((flags & mask) == 0)
36840 no_str = "no-";
36841 len += sizeof ("no-") - 1;
36844 flags &= ~mask;
36847 else
36849 if ((flags & mask) != 0)
36851 no_str = "no-";
36852 len += sizeof ("no-") - 1;
36855 flags |= mask;
36858 cur_column += len;
36859 if (cur_column > max_column)
36861 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36862 cur_column = start_column + len;
36863 comma = "";
36866 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36867 comma = ", ";
36868 comma_len = sizeof (", ") - 1;
36871 fputs ("\n", file);
36874 /* Helper function to print the current isa options on a line. */
36876 static void
36877 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36878 HOST_WIDE_INT flags)
36880 rs6000_print_options_internal (file, indent, string, flags, "-m",
36881 &rs6000_opt_masks[0],
36882 ARRAY_SIZE (rs6000_opt_masks));
36885 static void
36886 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36887 HOST_WIDE_INT flags)
36889 rs6000_print_options_internal (file, indent, string, flags, "",
36890 &rs6000_builtin_mask_names[0],
36891 ARRAY_SIZE (rs6000_builtin_mask_names));
36894 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36895 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36896 -mupper-regs-df, etc.).
36898 If the user used -mno-power8-vector, we need to turn off all of the implicit
36899 ISA 2.07 and 3.0 options that relate to the vector unit.
36901 If the user used -mno-power9-vector, we need to turn off all of the implicit
36902 ISA 3.0 options that relate to the vector unit.
36904 This function does not handle explicit options such as the user specifying
36905 -mdirect-move. These are handled in rs6000_option_override_internal, and
36906 the appropriate error is given if needed.
36908 We return a mask of all of the implicit options that should not be enabled
36909 by default. */
36911 static HOST_WIDE_INT
36912 rs6000_disable_incompatible_switches (void)
36914 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36915 size_t i, j;
36917 static const struct {
36918 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36919 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36920 const char *const name; /* name of the switch. */
36921 } flags[] = {
36922 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36923 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36924 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36927 for (i = 0; i < ARRAY_SIZE (flags); i++)
36929 HOST_WIDE_INT no_flag = flags[i].no_flag;
36931 if ((rs6000_isa_flags & no_flag) == 0
36932 && (rs6000_isa_flags_explicit & no_flag) != 0)
36934 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36935 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36936 & rs6000_isa_flags
36937 & dep_flags);
36939 if (set_flags)
36941 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36942 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36944 set_flags &= ~rs6000_opt_masks[j].mask;
36945 error ("%<-mno-%s%> turns off %<-m%s%>",
36946 flags[i].name,
36947 rs6000_opt_masks[j].name);
36950 gcc_assert (!set_flags);
36953 rs6000_isa_flags &= ~dep_flags;
36954 ignore_masks |= no_flag | dep_flags;
36958 return ignore_masks;
36962 /* Helper function for printing the function name when debugging. */
36964 static const char *
36965 get_decl_name (tree fn)
36967 tree name;
36969 if (!fn)
36970 return "<null>";
36972 name = DECL_NAME (fn);
36973 if (!name)
36974 return "<no-name>";
36976 return IDENTIFIER_POINTER (name);
36979 /* Return the clone id of the target we are compiling code for in a target
36980 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36981 the priority list for the target clones (ordered from lowest to
36982 highest). */
36984 static int
36985 rs6000_clone_priority (tree fndecl)
36987 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36988 HOST_WIDE_INT isa_masks;
36989 int ret = CLONE_DEFAULT;
36990 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36991 const char *attrs_str = NULL;
36993 attrs = TREE_VALUE (TREE_VALUE (attrs));
36994 attrs_str = TREE_STRING_POINTER (attrs);
36996 /* Return priority zero for default function. Return the ISA needed for the
36997 function if it is not the default. */
36998 if (strcmp (attrs_str, "default") != 0)
37000 if (fn_opts == NULL_TREE)
37001 fn_opts = target_option_default_node;
37003 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37004 isa_masks = rs6000_isa_flags;
37005 else
37006 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37008 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37009 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37010 break;
37013 if (TARGET_DEBUG_TARGET)
37014 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37015 get_decl_name (fndecl), ret);
37017 return ret;
37020 /* This compares the priority of target features in function DECL1 and DECL2.
37021 It returns positive value if DECL1 is higher priority, negative value if
37022 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37023 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37025 static int
37026 rs6000_compare_version_priority (tree decl1, tree decl2)
37028 int priority1 = rs6000_clone_priority (decl1);
37029 int priority2 = rs6000_clone_priority (decl2);
37030 int ret = priority1 - priority2;
37032 if (TARGET_DEBUG_TARGET)
37033 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37034 get_decl_name (decl1), get_decl_name (decl2), ret);
37036 return ret;
37039 /* Make a dispatcher declaration for the multi-versioned function DECL.
37040 Calls to DECL function will be replaced with calls to the dispatcher
37041 by the front-end. Returns the decl of the dispatcher function. */
37043 static tree
37044 rs6000_get_function_versions_dispatcher (void *decl)
37046 tree fn = (tree) decl;
37047 struct cgraph_node *node = NULL;
37048 struct cgraph_node *default_node = NULL;
37049 struct cgraph_function_version_info *node_v = NULL;
37050 struct cgraph_function_version_info *first_v = NULL;
37052 tree dispatch_decl = NULL;
37054 struct cgraph_function_version_info *default_version_info = NULL;
37055 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37057 if (TARGET_DEBUG_TARGET)
37058 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37059 get_decl_name (fn));
37061 node = cgraph_node::get (fn);
37062 gcc_assert (node != NULL);
37064 node_v = node->function_version ();
37065 gcc_assert (node_v != NULL);
37067 if (node_v->dispatcher_resolver != NULL)
37068 return node_v->dispatcher_resolver;
37070 /* Find the default version and make it the first node. */
37071 first_v = node_v;
37072 /* Go to the beginning of the chain. */
37073 while (first_v->prev != NULL)
37074 first_v = first_v->prev;
37076 default_version_info = first_v;
37077 while (default_version_info != NULL)
37079 const tree decl2 = default_version_info->this_node->decl;
37080 if (is_function_default_version (decl2))
37081 break;
37082 default_version_info = default_version_info->next;
37085 /* If there is no default node, just return NULL. */
37086 if (default_version_info == NULL)
37087 return NULL;
37089 /* Make default info the first node. */
37090 if (first_v != default_version_info)
37092 default_version_info->prev->next = default_version_info->next;
37093 if (default_version_info->next)
37094 default_version_info->next->prev = default_version_info->prev;
37095 first_v->prev = default_version_info;
37096 default_version_info->next = first_v;
37097 default_version_info->prev = NULL;
37100 default_node = default_version_info->this_node;
37102 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37103 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37104 "target_clones attribute needs GLIBC (2.23 and newer) that "
37105 "exports hardware capability bits");
37106 #else
37108 if (targetm.has_ifunc_p ())
37110 struct cgraph_function_version_info *it_v = NULL;
37111 struct cgraph_node *dispatcher_node = NULL;
37112 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37114 /* Right now, the dispatching is done via ifunc. */
37115 dispatch_decl = make_dispatcher_decl (default_node->decl);
37117 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37118 gcc_assert (dispatcher_node != NULL);
37119 dispatcher_node->dispatcher_function = 1;
37120 dispatcher_version_info
37121 = dispatcher_node->insert_new_function_version ();
37122 dispatcher_version_info->next = default_version_info;
37123 dispatcher_node->definition = 1;
37125 /* Set the dispatcher for all the versions. */
37126 it_v = default_version_info;
37127 while (it_v != NULL)
37129 it_v->dispatcher_resolver = dispatch_decl;
37130 it_v = it_v->next;
37133 else
37135 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37136 "multiversioning needs ifunc which is not supported "
37137 "on this target");
37139 #endif
37141 return dispatch_decl;
37144 /* Make the resolver function decl to dispatch the versions of a multi-
37145 versioned function, DEFAULT_DECL. Create an empty basic block in the
37146 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37147 function. */
37149 static tree
37150 make_resolver_func (const tree default_decl,
37151 const tree dispatch_decl,
37152 basic_block *empty_bb)
37154 /* Make the resolver function static. The resolver function returns
37155 void *. */
37156 tree decl_name = clone_function_name (default_decl, "resolver");
37157 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37158 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37159 tree decl = build_fn_decl (resolver_name, type);
37160 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37162 DECL_NAME (decl) = decl_name;
37163 TREE_USED (decl) = 1;
37164 DECL_ARTIFICIAL (decl) = 1;
37165 DECL_IGNORED_P (decl) = 0;
37166 TREE_PUBLIC (decl) = 0;
37167 DECL_UNINLINABLE (decl) = 1;
37169 /* Resolver is not external, body is generated. */
37170 DECL_EXTERNAL (decl) = 0;
37171 DECL_EXTERNAL (dispatch_decl) = 0;
37173 DECL_CONTEXT (decl) = NULL_TREE;
37174 DECL_INITIAL (decl) = make_node (BLOCK);
37175 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37177 /* Build result decl and add to function_decl. */
37178 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37179 DECL_ARTIFICIAL (t) = 1;
37180 DECL_IGNORED_P (t) = 1;
37181 DECL_RESULT (decl) = t;
37183 gimplify_function_tree (decl);
37184 push_cfun (DECL_STRUCT_FUNCTION (decl));
37185 *empty_bb = init_lowered_empty_function (decl, false,
37186 profile_count::uninitialized ());
37188 cgraph_node::add_new_function (decl, true);
37189 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37191 pop_cfun ();
37193 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37194 DECL_ATTRIBUTES (dispatch_decl)
37195 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37197 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37199 return decl;
37202 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37203 return a pointer to VERSION_DECL if we are running on a machine that
37204 supports the index CLONE_ISA hardware architecture bits. This function will
37205 be called during version dispatch to decide which function version to
37206 execute. It returns the basic block at the end, to which more conditions
37207 can be added. */
37209 static basic_block
37210 add_condition_to_bb (tree function_decl, tree version_decl,
37211 int clone_isa, basic_block new_bb)
37213 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37215 gcc_assert (new_bb != NULL);
37216 gimple_seq gseq = bb_seq (new_bb);
37219 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37220 build_fold_addr_expr (version_decl));
37221 tree result_var = create_tmp_var (ptr_type_node);
37222 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37223 gimple *return_stmt = gimple_build_return (result_var);
37225 if (clone_isa == CLONE_DEFAULT)
37227 gimple_seq_add_stmt (&gseq, convert_stmt);
37228 gimple_seq_add_stmt (&gseq, return_stmt);
37229 set_bb_seq (new_bb, gseq);
37230 gimple_set_bb (convert_stmt, new_bb);
37231 gimple_set_bb (return_stmt, new_bb);
37232 pop_cfun ();
37233 return new_bb;
37236 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37237 tree cond_var = create_tmp_var (bool_int_type_node);
37238 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37239 const char *arg_str = rs6000_clone_map[clone_isa].name;
37240 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37241 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37242 gimple_call_set_lhs (call_cond_stmt, cond_var);
37244 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37245 gimple_set_bb (call_cond_stmt, new_bb);
37246 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37248 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37249 NULL_TREE, NULL_TREE);
37250 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37251 gimple_set_bb (if_else_stmt, new_bb);
37252 gimple_seq_add_stmt (&gseq, if_else_stmt);
37254 gimple_seq_add_stmt (&gseq, convert_stmt);
37255 gimple_seq_add_stmt (&gseq, return_stmt);
37256 set_bb_seq (new_bb, gseq);
37258 basic_block bb1 = new_bb;
37259 edge e12 = split_block (bb1, if_else_stmt);
37260 basic_block bb2 = e12->dest;
37261 e12->flags &= ~EDGE_FALLTHRU;
37262 e12->flags |= EDGE_TRUE_VALUE;
37264 edge e23 = split_block (bb2, return_stmt);
37265 gimple_set_bb (convert_stmt, bb2);
37266 gimple_set_bb (return_stmt, bb2);
37268 basic_block bb3 = e23->dest;
37269 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37271 remove_edge (e23);
37272 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37274 pop_cfun ();
37275 return bb3;
37278 /* This function generates the dispatch function for multi-versioned functions.
37279 DISPATCH_DECL is the function which will contain the dispatch logic.
37280 FNDECLS are the function choices for dispatch, and is a tree chain.
37281 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37282 code is generated. */
37284 static int
37285 dispatch_function_versions (tree dispatch_decl,
37286 void *fndecls_p,
37287 basic_block *empty_bb)
37289 int ix;
37290 tree ele;
37291 vec<tree> *fndecls;
37292 tree clones[CLONE_MAX];
37294 if (TARGET_DEBUG_TARGET)
37295 fputs ("dispatch_function_versions, top\n", stderr);
37297 gcc_assert (dispatch_decl != NULL
37298 && fndecls_p != NULL
37299 && empty_bb != NULL);
37301 /* fndecls_p is actually a vector. */
37302 fndecls = static_cast<vec<tree> *> (fndecls_p);
37304 /* At least one more version other than the default. */
37305 gcc_assert (fndecls->length () >= 2);
37307 /* The first version in the vector is the default decl. */
37308 memset ((void *) clones, '\0', sizeof (clones));
37309 clones[CLONE_DEFAULT] = (*fndecls)[0];
37311 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37312 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37313 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37314 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37315 to insert the code here to do the call. */
37317 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37319 int priority = rs6000_clone_priority (ele);
37320 if (!clones[priority])
37321 clones[priority] = ele;
37324 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37325 if (clones[ix])
37327 if (TARGET_DEBUG_TARGET)
37328 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37329 ix, get_decl_name (clones[ix]));
37331 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37332 *empty_bb);
37335 return 0;
37338 /* Generate the dispatching code body to dispatch multi-versioned function
37339 DECL. The target hook is called to process the "target" attributes and
37340 provide the code to dispatch the right function at run-time. NODE points
37341 to the dispatcher decl whose body will be created. */
37343 static tree
37344 rs6000_generate_version_dispatcher_body (void *node_p)
37346 tree resolver;
37347 basic_block empty_bb;
37348 struct cgraph_node *node = (cgraph_node *) node_p;
37349 struct cgraph_function_version_info *ninfo = node->function_version ();
37351 if (ninfo->dispatcher_resolver)
37352 return ninfo->dispatcher_resolver;
37354 /* node is going to be an alias, so remove the finalized bit. */
37355 node->definition = false;
37357 /* The first version in the chain corresponds to the default version. */
37358 ninfo->dispatcher_resolver = resolver
37359 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37361 if (TARGET_DEBUG_TARGET)
37362 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37363 get_decl_name (resolver));
37365 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37366 auto_vec<tree, 2> fn_ver_vec;
37368 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37369 vinfo;
37370 vinfo = vinfo->next)
37372 struct cgraph_node *version = vinfo->this_node;
37373 /* Check for virtual functions here again, as by this time it should
37374 have been determined if this function needs a vtable index or
37375 not. This happens for methods in derived classes that override
37376 virtual methods in base classes but are not explicitly marked as
37377 virtual. */
37378 if (DECL_VINDEX (version->decl))
37379 sorry ("Virtual function multiversioning not supported");
37381 fn_ver_vec.safe_push (version->decl);
37384 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37385 cgraph_edge::rebuild_edges ();
37386 pop_cfun ();
37387 return resolver;
37391 /* Hook to determine if one function can safely inline another. */
37393 static bool
37394 rs6000_can_inline_p (tree caller, tree callee)
37396 bool ret = false;
37397 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37398 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37400 /* If callee has no option attributes, then it is ok to inline. */
37401 if (!callee_tree)
37402 ret = true;
37404 /* If caller has no option attributes, but callee does then it is not ok to
37405 inline. */
37406 else if (!caller_tree)
37407 ret = false;
37409 else
37411 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37412 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37414 /* Callee's options should a subset of the caller's, i.e. a vsx function
37415 can inline an altivec function but a non-vsx function can't inline a
37416 vsx function. */
37417 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37418 == callee_opts->x_rs6000_isa_flags)
37419 ret = true;
37422 if (TARGET_DEBUG_TARGET)
37423 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37424 get_decl_name (caller), get_decl_name (callee),
37425 (ret ? "can" : "cannot"));
37427 return ret;
37430 /* Allocate a stack temp and fixup the address so it meets the particular
37431 memory requirements (either offetable or REG+REG addressing). */
37434 rs6000_allocate_stack_temp (machine_mode mode,
37435 bool offsettable_p,
37436 bool reg_reg_p)
37438 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37439 rtx addr = XEXP (stack, 0);
37440 int strict_p = reload_completed;
37442 if (!legitimate_indirect_address_p (addr, strict_p))
37444 if (offsettable_p
37445 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37446 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37448 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37449 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37452 return stack;
37455 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37456 to such a form to deal with memory reference instructions like STFIWX that
37457 only take reg+reg addressing. */
37460 rs6000_address_for_fpconvert (rtx x)
37462 rtx addr;
37464 gcc_assert (MEM_P (x));
37465 addr = XEXP (x, 0);
37466 if (! legitimate_indirect_address_p (addr, reload_completed)
37467 && ! legitimate_indexed_address_p (addr, reload_completed))
37469 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37471 rtx reg = XEXP (addr, 0);
37472 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37473 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37474 gcc_assert (REG_P (reg));
37475 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37476 addr = reg;
37478 else if (GET_CODE (addr) == PRE_MODIFY)
37480 rtx reg = XEXP (addr, 0);
37481 rtx expr = XEXP (addr, 1);
37482 gcc_assert (REG_P (reg));
37483 gcc_assert (GET_CODE (expr) == PLUS);
37484 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37485 addr = reg;
37488 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37491 return x;
37494 /* Given a memory reference, if it is not in the form for altivec memory
37495 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37496 convert to the altivec format. */
37499 rs6000_address_for_altivec (rtx x)
37501 gcc_assert (MEM_P (x));
37502 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
37504 rtx addr = XEXP (x, 0);
37506 if (!legitimate_indexed_address_p (addr, reload_completed)
37507 && !legitimate_indirect_address_p (addr, reload_completed))
37508 addr = copy_to_mode_reg (Pmode, addr);
37510 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
37511 x = change_address (x, GET_MODE (x), addr);
37514 return x;
37517 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37519 On the RS/6000, all integer constants are acceptable, most won't be valid
37520 for particular insns, though. Only easy FP constants are acceptable. */
37522 static bool
37523 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37525 if (TARGET_ELF && tls_referenced_p (x))
37526 return false;
37528 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37529 || GET_MODE (x) == VOIDmode
37530 || (TARGET_POWERPC64 && mode == DImode)
37531 || easy_fp_constant (x, mode)
37532 || easy_vector_constant (x, mode));
37536 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37538 static bool
37539 chain_already_loaded (rtx_insn *last)
37541 for (; last != NULL; last = PREV_INSN (last))
37543 if (NONJUMP_INSN_P (last))
37545 rtx patt = PATTERN (last);
37547 if (GET_CODE (patt) == SET)
37549 rtx lhs = XEXP (patt, 0);
37551 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37552 return true;
37556 return false;
37559 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37561 void
37562 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37564 const bool direct_call_p
37565 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37566 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37567 rtx toc_load = NULL_RTX;
37568 rtx toc_restore = NULL_RTX;
37569 rtx func_addr;
37570 rtx abi_reg = NULL_RTX;
37571 rtx call[4];
37572 int n_call;
37573 rtx insn;
37575 /* Handle longcall attributes. */
37576 if (INTVAL (cookie) & CALL_LONG)
37577 func_desc = rs6000_longcall_ref (func_desc);
37579 /* Handle indirect calls. */
37580 if (GET_CODE (func_desc) != SYMBOL_REF
37581 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37583 /* Save the TOC into its reserved slot before the call,
37584 and prepare to restore it after the call. */
37585 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37586 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37587 rtx stack_toc_mem = gen_frame_mem (Pmode,
37588 gen_rtx_PLUS (Pmode, stack_ptr,
37589 stack_toc_offset));
37590 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37591 gen_rtvec (1, stack_toc_offset),
37592 UNSPEC_TOCSLOT);
37593 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37595 /* Can we optimize saving the TOC in the prologue or
37596 do we need to do it at every call? */
37597 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37598 cfun->machine->save_toc_in_prologue = true;
37599 else
37601 MEM_VOLATILE_P (stack_toc_mem) = 1;
37602 emit_move_insn (stack_toc_mem, toc_reg);
37605 if (DEFAULT_ABI == ABI_ELFv2)
37607 /* A function pointer in the ELFv2 ABI is just a plain address, but
37608 the ABI requires it to be loaded into r12 before the call. */
37609 func_addr = gen_rtx_REG (Pmode, 12);
37610 emit_move_insn (func_addr, func_desc);
37611 abi_reg = func_addr;
37613 else
37615 /* A function pointer under AIX is a pointer to a data area whose
37616 first word contains the actual address of the function, whose
37617 second word contains a pointer to its TOC, and whose third word
37618 contains a value to place in the static chain register (r11).
37619 Note that if we load the static chain, our "trampoline" need
37620 not have any executable code. */
37622 /* Load up address of the actual function. */
37623 func_desc = force_reg (Pmode, func_desc);
37624 func_addr = gen_reg_rtx (Pmode);
37625 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37627 /* Prepare to load the TOC of the called function. Note that the
37628 TOC load must happen immediately before the actual call so
37629 that unwinding the TOC registers works correctly. See the
37630 comment in frob_update_context. */
37631 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37632 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37633 gen_rtx_PLUS (Pmode, func_desc,
37634 func_toc_offset));
37635 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37637 /* If we have a static chain, load it up. But, if the call was
37638 originally direct, the 3rd word has not been written since no
37639 trampoline has been built, so we ought not to load it, lest we
37640 override a static chain value. */
37641 if (!direct_call_p
37642 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37643 && !chain_already_loaded (get_current_sequence ()->next->last))
37645 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37646 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37647 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37648 gen_rtx_PLUS (Pmode, func_desc,
37649 func_sc_offset));
37650 emit_move_insn (sc_reg, func_sc_mem);
37651 abi_reg = sc_reg;
37655 else
37657 /* Direct calls use the TOC: for local calls, the callee will
37658 assume the TOC register is set; for non-local calls, the
37659 PLT stub needs the TOC register. */
37660 abi_reg = toc_reg;
37661 func_addr = func_desc;
37664 /* Create the call. */
37665 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37666 if (value != NULL_RTX)
37667 call[0] = gen_rtx_SET (value, call[0]);
37668 n_call = 1;
37670 if (toc_load)
37671 call[n_call++] = toc_load;
37672 if (toc_restore)
37673 call[n_call++] = toc_restore;
37675 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37677 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37678 insn = emit_call_insn (insn);
37680 /* Mention all registers defined by the ABI to hold information
37681 as uses in CALL_INSN_FUNCTION_USAGE. */
37682 if (abi_reg)
37683 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37686 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37688 void
37689 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37691 rtx call[2];
37692 rtx insn;
37694 gcc_assert (INTVAL (cookie) == 0);
37696 /* Create the call. */
37697 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37698 if (value != NULL_RTX)
37699 call[0] = gen_rtx_SET (value, call[0]);
37701 call[1] = simple_return_rtx;
37703 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37704 insn = emit_call_insn (insn);
37706 /* Note use of the TOC register. */
37707 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37710 /* Return whether we need to always update the saved TOC pointer when we update
37711 the stack pointer. */
37713 static bool
37714 rs6000_save_toc_in_prologue_p (void)
37716 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37719 #ifdef HAVE_GAS_HIDDEN
37720 # define USE_HIDDEN_LINKONCE 1
37721 #else
37722 # define USE_HIDDEN_LINKONCE 0
37723 #endif
37725 /* Fills in the label name that should be used for a 476 link stack thunk. */
37727 void
37728 get_ppc476_thunk_name (char name[32])
37730 gcc_assert (TARGET_LINK_STACK);
37732 if (USE_HIDDEN_LINKONCE)
37733 sprintf (name, "__ppc476.get_thunk");
37734 else
37735 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37738 /* This function emits the simple thunk routine that is used to preserve
37739 the link stack on the 476 cpu. */
37741 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37742 static void
37743 rs6000_code_end (void)
37745 char name[32];
37746 tree decl;
37748 if (!TARGET_LINK_STACK)
37749 return;
37751 get_ppc476_thunk_name (name);
37753 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37754 build_function_type_list (void_type_node, NULL_TREE));
37755 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37756 NULL_TREE, void_type_node);
37757 TREE_PUBLIC (decl) = 1;
37758 TREE_STATIC (decl) = 1;
37760 #if RS6000_WEAK
37761 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37763 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37764 targetm.asm_out.unique_section (decl, 0);
37765 switch_to_section (get_named_section (decl, NULL, 0));
37766 DECL_WEAK (decl) = 1;
37767 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37768 targetm.asm_out.globalize_label (asm_out_file, name);
37769 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37770 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37772 else
37773 #endif
37775 switch_to_section (text_section);
37776 ASM_OUTPUT_LABEL (asm_out_file, name);
37779 DECL_INITIAL (decl) = make_node (BLOCK);
37780 current_function_decl = decl;
37781 allocate_struct_function (decl, false);
37782 init_function_start (decl);
37783 first_function_block_is_cold = false;
37784 /* Make sure unwind info is emitted for the thunk if needed. */
37785 final_start_function (emit_barrier (), asm_out_file, 1);
37787 fputs ("\tblr\n", asm_out_file);
37789 final_end_function ();
37790 init_insn_lengths ();
37791 free_after_compilation (cfun);
37792 set_cfun (NULL);
37793 current_function_decl = NULL;
37796 /* Add r30 to hard reg set if the prologue sets it up and it is not
37797 pic_offset_table_rtx. */
37799 static void
37800 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37802 if (!TARGET_SINGLE_PIC_BASE
37803 && TARGET_TOC
37804 && TARGET_MINIMAL_TOC
37805 && !constant_pool_empty_p ())
37806 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37807 if (cfun->machine->split_stack_argp_used)
37808 add_to_hard_reg_set (&set->set, Pmode, 12);
37812 /* Helper function for rs6000_split_logical to emit a logical instruction after
37813 spliting the operation to single GPR registers.
37815 DEST is the destination register.
37816 OP1 and OP2 are the input source registers.
37817 CODE is the base operation (AND, IOR, XOR, NOT).
37818 MODE is the machine mode.
37819 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37820 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37821 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37823 static void
37824 rs6000_split_logical_inner (rtx dest,
37825 rtx op1,
37826 rtx op2,
37827 enum rtx_code code,
37828 machine_mode mode,
37829 bool complement_final_p,
37830 bool complement_op1_p,
37831 bool complement_op2_p)
37833 rtx bool_rtx;
37835 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37836 if (op2 && GET_CODE (op2) == CONST_INT
37837 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37838 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37840 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37841 HOST_WIDE_INT value = INTVAL (op2) & mask;
37843 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37844 if (code == AND)
37846 if (value == 0)
37848 emit_insn (gen_rtx_SET (dest, const0_rtx));
37849 return;
37852 else if (value == mask)
37854 if (!rtx_equal_p (dest, op1))
37855 emit_insn (gen_rtx_SET (dest, op1));
37856 return;
37860 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37861 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37862 else if (code == IOR || code == XOR)
37864 if (value == 0)
37866 if (!rtx_equal_p (dest, op1))
37867 emit_insn (gen_rtx_SET (dest, op1));
37868 return;
37873 if (code == AND && mode == SImode
37874 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37876 emit_insn (gen_andsi3 (dest, op1, op2));
37877 return;
37880 if (complement_op1_p)
37881 op1 = gen_rtx_NOT (mode, op1);
37883 if (complement_op2_p)
37884 op2 = gen_rtx_NOT (mode, op2);
37886 /* For canonical RTL, if only one arm is inverted it is the first. */
37887 if (!complement_op1_p && complement_op2_p)
37888 std::swap (op1, op2);
37890 bool_rtx = ((code == NOT)
37891 ? gen_rtx_NOT (mode, op1)
37892 : gen_rtx_fmt_ee (code, mode, op1, op2));
37894 if (complement_final_p)
37895 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37897 emit_insn (gen_rtx_SET (dest, bool_rtx));
37900 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37901 operations are split immediately during RTL generation to allow for more
37902 optimizations of the AND/IOR/XOR.
37904 OPERANDS is an array containing the destination and two input operands.
37905 CODE is the base operation (AND, IOR, XOR, NOT).
37906 MODE is the machine mode.
37907 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37908 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37909 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37910 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37911 formation of the AND instructions. */
37913 static void
37914 rs6000_split_logical_di (rtx operands[3],
37915 enum rtx_code code,
37916 bool complement_final_p,
37917 bool complement_op1_p,
37918 bool complement_op2_p)
37920 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37921 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37922 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37923 enum hi_lo { hi = 0, lo = 1 };
37924 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37925 size_t i;
37927 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37928 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37929 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37930 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37932 if (code == NOT)
37933 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37934 else
37936 if (GET_CODE (operands[2]) != CONST_INT)
37938 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37939 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37941 else
37943 HOST_WIDE_INT value = INTVAL (operands[2]);
37944 HOST_WIDE_INT value_hi_lo[2];
37946 gcc_assert (!complement_final_p);
37947 gcc_assert (!complement_op1_p);
37948 gcc_assert (!complement_op2_p);
37950 value_hi_lo[hi] = value >> 32;
37951 value_hi_lo[lo] = value & lower_32bits;
37953 for (i = 0; i < 2; i++)
37955 HOST_WIDE_INT sub_value = value_hi_lo[i];
37957 if (sub_value & sign_bit)
37958 sub_value |= upper_32bits;
37960 op2_hi_lo[i] = GEN_INT (sub_value);
37962 /* If this is an AND instruction, check to see if we need to load
37963 the value in a register. */
37964 if (code == AND && sub_value != -1 && sub_value != 0
37965 && !and_operand (op2_hi_lo[i], SImode))
37966 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37971 for (i = 0; i < 2; i++)
37973 /* Split large IOR/XOR operations. */
37974 if ((code == IOR || code == XOR)
37975 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37976 && !complement_final_p
37977 && !complement_op1_p
37978 && !complement_op2_p
37979 && !logical_const_operand (op2_hi_lo[i], SImode))
37981 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37982 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37983 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37984 rtx tmp = gen_reg_rtx (SImode);
37986 /* Make sure the constant is sign extended. */
37987 if ((hi_16bits & sign_bit) != 0)
37988 hi_16bits |= upper_32bits;
37990 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37991 code, SImode, false, false, false);
37993 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37994 code, SImode, false, false, false);
37996 else
37997 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37998 code, SImode, complement_final_p,
37999 complement_op1_p, complement_op2_p);
38002 return;
38005 /* Split the insns that make up boolean operations operating on multiple GPR
38006 registers. The boolean MD patterns ensure that the inputs either are
38007 exactly the same as the output registers, or there is no overlap.
38009 OPERANDS is an array containing the destination and two input operands.
38010 CODE is the base operation (AND, IOR, XOR, NOT).
38011 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38012 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38013 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38015 void
38016 rs6000_split_logical (rtx operands[3],
38017 enum rtx_code code,
38018 bool complement_final_p,
38019 bool complement_op1_p,
38020 bool complement_op2_p)
38022 machine_mode mode = GET_MODE (operands[0]);
38023 machine_mode sub_mode;
38024 rtx op0, op1, op2;
38025 int sub_size, regno0, regno1, nregs, i;
38027 /* If this is DImode, use the specialized version that can run before
38028 register allocation. */
38029 if (mode == DImode && !TARGET_POWERPC64)
38031 rs6000_split_logical_di (operands, code, complement_final_p,
38032 complement_op1_p, complement_op2_p);
38033 return;
38036 op0 = operands[0];
38037 op1 = operands[1];
38038 op2 = (code == NOT) ? NULL_RTX : operands[2];
38039 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38040 sub_size = GET_MODE_SIZE (sub_mode);
38041 regno0 = REGNO (op0);
38042 regno1 = REGNO (op1);
38044 gcc_assert (reload_completed);
38045 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38046 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38048 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38049 gcc_assert (nregs > 1);
38051 if (op2 && REG_P (op2))
38052 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38054 for (i = 0; i < nregs; i++)
38056 int offset = i * sub_size;
38057 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38058 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38059 rtx sub_op2 = ((code == NOT)
38060 ? NULL_RTX
38061 : simplify_subreg (sub_mode, op2, mode, offset));
38063 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38064 complement_final_p, complement_op1_p,
38065 complement_op2_p);
38068 return;
38072 /* Return true if the peephole2 can combine a load involving a combination of
38073 an addis instruction and a load with an offset that can be fused together on
38074 a power8. */
38076 bool
38077 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38078 rtx addis_value, /* addis value. */
38079 rtx target, /* target register that is loaded. */
38080 rtx mem) /* bottom part of the memory addr. */
38082 rtx addr;
38083 rtx base_reg;
38085 /* Validate arguments. */
38086 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38087 return false;
38089 if (!base_reg_operand (target, GET_MODE (target)))
38090 return false;
38092 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38093 return false;
38095 /* Allow sign/zero extension. */
38096 if (GET_CODE (mem) == ZERO_EXTEND
38097 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38098 mem = XEXP (mem, 0);
38100 if (!MEM_P (mem))
38101 return false;
38103 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38104 return false;
38106 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38107 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38108 return false;
38110 /* Validate that the register used to load the high value is either the
38111 register being loaded, or we can safely replace its use.
38113 This function is only called from the peephole2 pass and we assume that
38114 there are 2 instructions in the peephole (addis and load), so we want to
38115 check if the target register was not used in the memory address and the
38116 register to hold the addis result is dead after the peephole. */
38117 if (REGNO (addis_reg) != REGNO (target))
38119 if (reg_mentioned_p (target, mem))
38120 return false;
38122 if (!peep2_reg_dead_p (2, addis_reg))
38123 return false;
38125 /* If the target register being loaded is the stack pointer, we must
38126 avoid loading any other value into it, even temporarily. */
38127 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38128 return false;
38131 base_reg = XEXP (addr, 0);
38132 return REGNO (addis_reg) == REGNO (base_reg);
38135 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38136 sequence. We adjust the addis register to use the target register. If the
38137 load sign extends, we adjust the code to do the zero extending load, and an
38138 explicit sign extension later since the fusion only covers zero extending
38139 loads.
38141 The operands are:
38142 operands[0] register set with addis (to be replaced with target)
38143 operands[1] value set via addis
38144 operands[2] target register being loaded
38145 operands[3] D-form memory reference using operands[0]. */
38147 void
38148 expand_fusion_gpr_load (rtx *operands)
38150 rtx addis_value = operands[1];
38151 rtx target = operands[2];
38152 rtx orig_mem = operands[3];
38153 rtx new_addr, new_mem, orig_addr, offset;
38154 enum rtx_code plus_or_lo_sum;
38155 machine_mode target_mode = GET_MODE (target);
38156 machine_mode extend_mode = target_mode;
38157 machine_mode ptr_mode = Pmode;
38158 enum rtx_code extend = UNKNOWN;
38160 if (GET_CODE (orig_mem) == ZERO_EXTEND
38161 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38163 extend = GET_CODE (orig_mem);
38164 orig_mem = XEXP (orig_mem, 0);
38165 target_mode = GET_MODE (orig_mem);
38168 gcc_assert (MEM_P (orig_mem));
38170 orig_addr = XEXP (orig_mem, 0);
38171 plus_or_lo_sum = GET_CODE (orig_addr);
38172 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38174 offset = XEXP (orig_addr, 1);
38175 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38176 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38178 if (extend != UNKNOWN)
38179 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38181 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38182 UNSPEC_FUSION_GPR);
38183 emit_insn (gen_rtx_SET (target, new_mem));
38185 if (extend == SIGN_EXTEND)
38187 int sub_off = ((BYTES_BIG_ENDIAN)
38188 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38189 : 0);
38190 rtx sign_reg
38191 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38193 emit_insn (gen_rtx_SET (target,
38194 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38197 return;
38200 /* Emit the addis instruction that will be part of a fused instruction
38201 sequence. */
38203 void
38204 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
38205 const char *mode_name)
38207 rtx fuse_ops[10];
38208 char insn_template[80];
38209 const char *addis_str = NULL;
38210 const char *comment_str = ASM_COMMENT_START;
38212 if (*comment_str == ' ')
38213 comment_str++;
38215 /* Emit the addis instruction. */
38216 fuse_ops[0] = target;
38217 if (satisfies_constraint_L (addis_value))
38219 fuse_ops[1] = addis_value;
38220 addis_str = "lis %0,%v1";
38223 else if (GET_CODE (addis_value) == PLUS)
38225 rtx op0 = XEXP (addis_value, 0);
38226 rtx op1 = XEXP (addis_value, 1);
38228 if (REG_P (op0) && CONST_INT_P (op1)
38229 && satisfies_constraint_L (op1))
38231 fuse_ops[1] = op0;
38232 fuse_ops[2] = op1;
38233 addis_str = "addis %0,%1,%v2";
38237 else if (GET_CODE (addis_value) == HIGH)
38239 rtx value = XEXP (addis_value, 0);
38240 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38242 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38243 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38244 if (TARGET_ELF)
38245 addis_str = "addis %0,%2,%1@toc@ha";
38247 else if (TARGET_XCOFF)
38248 addis_str = "addis %0,%1@u(%2)";
38250 else
38251 gcc_unreachable ();
38254 else if (GET_CODE (value) == PLUS)
38256 rtx op0 = XEXP (value, 0);
38257 rtx op1 = XEXP (value, 1);
38259 if (GET_CODE (op0) == UNSPEC
38260 && XINT (op0, 1) == UNSPEC_TOCREL
38261 && CONST_INT_P (op1))
38263 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38264 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38265 fuse_ops[3] = op1;
38266 if (TARGET_ELF)
38267 addis_str = "addis %0,%2,%1+%3@toc@ha";
38269 else if (TARGET_XCOFF)
38270 addis_str = "addis %0,%1+%3@u(%2)";
38272 else
38273 gcc_unreachable ();
38277 else if (satisfies_constraint_L (value))
38279 fuse_ops[1] = value;
38280 addis_str = "lis %0,%v1";
38283 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38285 fuse_ops[1] = value;
38286 addis_str = "lis %0,%1@ha";
38290 if (!addis_str)
38291 fatal_insn ("Could not generate addis value for fusion", addis_value);
38293 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
38294 comment, mode_name);
38295 output_asm_insn (insn_template, fuse_ops);
38298 /* Emit a D-form load or store instruction that is the second instruction
38299 of a fusion sequence. */
38301 void
38302 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38303 const char *insn_str)
38305 rtx fuse_ops[10];
38306 char insn_template[80];
38308 fuse_ops[0] = load_store_reg;
38309 fuse_ops[1] = addis_reg;
38311 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38313 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38314 fuse_ops[2] = offset;
38315 output_asm_insn (insn_template, fuse_ops);
38318 else if (GET_CODE (offset) == UNSPEC
38319 && XINT (offset, 1) == UNSPEC_TOCREL)
38321 if (TARGET_ELF)
38322 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38324 else if (TARGET_XCOFF)
38325 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38327 else
38328 gcc_unreachable ();
38330 fuse_ops[2] = XVECEXP (offset, 0, 0);
38331 output_asm_insn (insn_template, fuse_ops);
38334 else if (GET_CODE (offset) == PLUS
38335 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38336 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38337 && CONST_INT_P (XEXP (offset, 1)))
38339 rtx tocrel_unspec = XEXP (offset, 0);
38340 if (TARGET_ELF)
38341 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38343 else if (TARGET_XCOFF)
38344 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38346 else
38347 gcc_unreachable ();
38349 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38350 fuse_ops[3] = XEXP (offset, 1);
38351 output_asm_insn (insn_template, fuse_ops);
38354 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38356 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38358 fuse_ops[2] = offset;
38359 output_asm_insn (insn_template, fuse_ops);
38362 else
38363 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38365 return;
38368 /* Wrap a TOC address that can be fused to indicate that special fusion
38369 processing is needed. */
38372 fusion_wrap_memory_address (rtx old_mem)
38374 rtx old_addr = XEXP (old_mem, 0);
38375 rtvec v = gen_rtvec (1, old_addr);
38376 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38377 return replace_equiv_address_nv (old_mem, new_addr, false);
38380 /* Given an address, convert it into the addis and load offset parts. Addresses
38381 created during the peephole2 process look like:
38382 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38383 (unspec [(...)] UNSPEC_TOCREL))
38385 Addresses created via toc fusion look like:
38386 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38388 static void
38389 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38391 rtx hi, lo;
38393 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38395 lo = XVECEXP (addr, 0, 0);
38396 hi = gen_rtx_HIGH (Pmode, lo);
38398 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38400 hi = XEXP (addr, 0);
38401 lo = XEXP (addr, 1);
38403 else
38404 gcc_unreachable ();
38406 *p_hi = hi;
38407 *p_lo = lo;
38410 /* Return a string to fuse an addis instruction with a gpr load to the same
38411 register that we loaded up the addis instruction. The address that is used
38412 is the logical address that was formed during peephole2:
38413 (lo_sum (high) (low-part))
38415 Or the address is the TOC address that is wrapped before register allocation:
38416 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38418 The code is complicated, so we call output_asm_insn directly, and just
38419 return "". */
38421 const char *
38422 emit_fusion_gpr_load (rtx target, rtx mem)
38424 rtx addis_value;
38425 rtx addr;
38426 rtx load_offset;
38427 const char *load_str = NULL;
38428 const char *mode_name = NULL;
38429 machine_mode mode;
38431 if (GET_CODE (mem) == ZERO_EXTEND)
38432 mem = XEXP (mem, 0);
38434 gcc_assert (REG_P (target) && MEM_P (mem));
38436 addr = XEXP (mem, 0);
38437 fusion_split_address (addr, &addis_value, &load_offset);
38439 /* Now emit the load instruction to the same register. */
38440 mode = GET_MODE (mem);
38441 switch (mode)
38443 case E_QImode:
38444 mode_name = "char";
38445 load_str = "lbz";
38446 break;
38448 case E_HImode:
38449 mode_name = "short";
38450 load_str = "lhz";
38451 break;
38453 case E_SImode:
38454 case E_SFmode:
38455 mode_name = (mode == SFmode) ? "float" : "int";
38456 load_str = "lwz";
38457 break;
38459 case E_DImode:
38460 case E_DFmode:
38461 gcc_assert (TARGET_POWERPC64);
38462 mode_name = (mode == DFmode) ? "double" : "long";
38463 load_str = "ld";
38464 break;
38466 default:
38467 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38470 /* Emit the addis instruction. */
38471 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
38473 /* Emit the D-form load instruction. */
38474 emit_fusion_load_store (target, target, load_offset, load_str);
38476 return "";
38480 /* Return true if the peephole2 can combine a load/store involving a
38481 combination of an addis instruction and the memory operation. This was
38482 added to the ISA 3.0 (power9) hardware. */
38484 bool
38485 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38486 rtx addis_value, /* addis value. */
38487 rtx dest, /* destination (memory or register). */
38488 rtx src) /* source (register or memory). */
38490 rtx addr, mem, offset;
38491 machine_mode mode = GET_MODE (src);
38493 /* Validate arguments. */
38494 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38495 return false;
38497 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38498 return false;
38500 /* Ignore extend operations that are part of the load. */
38501 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38502 src = XEXP (src, 0);
38504 /* Test for memory<-register or register<-memory. */
38505 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38507 if (!MEM_P (dest))
38508 return false;
38510 mem = dest;
38513 else if (MEM_P (src))
38515 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38516 return false;
38518 mem = src;
38521 else
38522 return false;
38524 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38525 if (GET_CODE (addr) == PLUS)
38527 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38528 return false;
38530 return satisfies_constraint_I (XEXP (addr, 1));
38533 else if (GET_CODE (addr) == LO_SUM)
38535 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38536 return false;
38538 offset = XEXP (addr, 1);
38539 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38540 return small_toc_ref (offset, GET_MODE (offset));
38542 else if (TARGET_ELF && !TARGET_POWERPC64)
38543 return CONSTANT_P (offset);
38546 return false;
38549 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38550 load sequence.
38552 The operands are:
38553 operands[0] register set with addis
38554 operands[1] value set via addis
38555 operands[2] target register being loaded
38556 operands[3] D-form memory reference using operands[0].
38558 This is similar to the fusion introduced with power8, except it scales to
38559 both loads/stores and does not require the result register to be the same as
38560 the base register. At the moment, we only do this if register set with addis
38561 is dead. */
38563 void
38564 expand_fusion_p9_load (rtx *operands)
38566 rtx tmp_reg = operands[0];
38567 rtx addis_value = operands[1];
38568 rtx target = operands[2];
38569 rtx orig_mem = operands[3];
38570 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38571 enum rtx_code plus_or_lo_sum;
38572 machine_mode target_mode = GET_MODE (target);
38573 machine_mode extend_mode = target_mode;
38574 machine_mode ptr_mode = Pmode;
38575 enum rtx_code extend = UNKNOWN;
38577 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38579 extend = GET_CODE (orig_mem);
38580 orig_mem = XEXP (orig_mem, 0);
38581 target_mode = GET_MODE (orig_mem);
38584 gcc_assert (MEM_P (orig_mem));
38586 orig_addr = XEXP (orig_mem, 0);
38587 plus_or_lo_sum = GET_CODE (orig_addr);
38588 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38590 offset = XEXP (orig_addr, 1);
38591 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38592 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38594 if (extend != UNKNOWN)
38595 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38597 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38598 UNSPEC_FUSION_P9);
38600 set = gen_rtx_SET (target, new_mem);
38601 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38602 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38603 emit_insn (insn);
38605 return;
38608 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38609 store sequence.
38611 The operands are:
38612 operands[0] register set with addis
38613 operands[1] value set via addis
38614 operands[2] target D-form memory being stored to
38615 operands[3] register being stored
38617 This is similar to the fusion introduced with power8, except it scales to
38618 both loads/stores and does not require the result register to be the same as
38619 the base register. At the moment, we only do this if register set with addis
38620 is dead. */
38622 void
38623 expand_fusion_p9_store (rtx *operands)
38625 rtx tmp_reg = operands[0];
38626 rtx addis_value = operands[1];
38627 rtx orig_mem = operands[2];
38628 rtx src = operands[3];
38629 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38630 enum rtx_code plus_or_lo_sum;
38631 machine_mode target_mode = GET_MODE (orig_mem);
38632 machine_mode ptr_mode = Pmode;
38634 gcc_assert (MEM_P (orig_mem));
38636 orig_addr = XEXP (orig_mem, 0);
38637 plus_or_lo_sum = GET_CODE (orig_addr);
38638 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38640 offset = XEXP (orig_addr, 1);
38641 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38642 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38644 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38645 UNSPEC_FUSION_P9);
38647 set = gen_rtx_SET (new_mem, new_src);
38648 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38649 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38650 emit_insn (insn);
38652 return;
38655 /* Return a string to fuse an addis instruction with a load using extended
38656 fusion. The address that is used is the logical address that was formed
38657 during peephole2: (lo_sum (high) (low-part))
38659 The code is complicated, so we call output_asm_insn directly, and just
38660 return "". */
38662 const char *
38663 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38665 machine_mode mode = GET_MODE (reg);
38666 rtx hi;
38667 rtx lo;
38668 rtx addr;
38669 const char *load_string;
38670 int r;
38672 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38674 mem = XEXP (mem, 0);
38675 mode = GET_MODE (mem);
38678 if (GET_CODE (reg) == SUBREG)
38680 gcc_assert (SUBREG_BYTE (reg) == 0);
38681 reg = SUBREG_REG (reg);
38684 if (!REG_P (reg))
38685 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38687 r = REGNO (reg);
38688 if (FP_REGNO_P (r))
38690 if (mode == SFmode)
38691 load_string = "lfs";
38692 else if (mode == DFmode || mode == DImode)
38693 load_string = "lfd";
38694 else
38695 gcc_unreachable ();
38697 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38699 if (mode == SFmode)
38700 load_string = "lxssp";
38701 else if (mode == DFmode || mode == DImode)
38702 load_string = "lxsd";
38703 else
38704 gcc_unreachable ();
38706 else if (INT_REGNO_P (r))
38708 switch (mode)
38710 case E_QImode:
38711 load_string = "lbz";
38712 break;
38713 case E_HImode:
38714 load_string = "lhz";
38715 break;
38716 case E_SImode:
38717 case E_SFmode:
38718 load_string = "lwz";
38719 break;
38720 case E_DImode:
38721 case E_DFmode:
38722 if (!TARGET_POWERPC64)
38723 gcc_unreachable ();
38724 load_string = "ld";
38725 break;
38726 default:
38727 gcc_unreachable ();
38730 else
38731 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38733 if (!MEM_P (mem))
38734 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38736 addr = XEXP (mem, 0);
38737 fusion_split_address (addr, &hi, &lo);
38739 /* Emit the addis instruction. */
38740 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
38742 /* Emit the D-form load instruction. */
38743 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38745 return "";
38748 /* Return a string to fuse an addis instruction with a store using extended
38749 fusion. The address that is used is the logical address that was formed
38750 during peephole2: (lo_sum (high) (low-part))
38752 The code is complicated, so we call output_asm_insn directly, and just
38753 return "". */
38755 const char *
38756 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38758 machine_mode mode = GET_MODE (reg);
38759 rtx hi;
38760 rtx lo;
38761 rtx addr;
38762 const char *store_string;
38763 int r;
38765 if (GET_CODE (reg) == SUBREG)
38767 gcc_assert (SUBREG_BYTE (reg) == 0);
38768 reg = SUBREG_REG (reg);
38771 if (!REG_P (reg))
38772 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38774 r = REGNO (reg);
38775 if (FP_REGNO_P (r))
38777 if (mode == SFmode)
38778 store_string = "stfs";
38779 else if (mode == DFmode)
38780 store_string = "stfd";
38781 else
38782 gcc_unreachable ();
38784 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38786 if (mode == SFmode)
38787 store_string = "stxssp";
38788 else if (mode == DFmode || mode == DImode)
38789 store_string = "stxsd";
38790 else
38791 gcc_unreachable ();
38793 else if (INT_REGNO_P (r))
38795 switch (mode)
38797 case E_QImode:
38798 store_string = "stb";
38799 break;
38800 case E_HImode:
38801 store_string = "sth";
38802 break;
38803 case E_SImode:
38804 case E_SFmode:
38805 store_string = "stw";
38806 break;
38807 case E_DImode:
38808 case E_DFmode:
38809 if (!TARGET_POWERPC64)
38810 gcc_unreachable ();
38811 store_string = "std";
38812 break;
38813 default:
38814 gcc_unreachable ();
38817 else
38818 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38820 if (!MEM_P (mem))
38821 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38823 addr = XEXP (mem, 0);
38824 fusion_split_address (addr, &hi, &lo);
38826 /* Emit the addis instruction. */
38827 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
38829 /* Emit the D-form load instruction. */
38830 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38832 return "";
38835 #ifdef RS6000_GLIBC_ATOMIC_FENV
38836 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38837 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38838 #endif
38840 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38842 static void
38843 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38845 if (!TARGET_HARD_FLOAT)
38847 #ifdef RS6000_GLIBC_ATOMIC_FENV
38848 if (atomic_hold_decl == NULL_TREE)
38850 atomic_hold_decl
38851 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38852 get_identifier ("__atomic_feholdexcept"),
38853 build_function_type_list (void_type_node,
38854 double_ptr_type_node,
38855 NULL_TREE));
38856 TREE_PUBLIC (atomic_hold_decl) = 1;
38857 DECL_EXTERNAL (atomic_hold_decl) = 1;
38860 if (atomic_clear_decl == NULL_TREE)
38862 atomic_clear_decl
38863 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38864 get_identifier ("__atomic_feclearexcept"),
38865 build_function_type_list (void_type_node,
38866 NULL_TREE));
38867 TREE_PUBLIC (atomic_clear_decl) = 1;
38868 DECL_EXTERNAL (atomic_clear_decl) = 1;
38871 tree const_double = build_qualified_type (double_type_node,
38872 TYPE_QUAL_CONST);
38873 tree const_double_ptr = build_pointer_type (const_double);
38874 if (atomic_update_decl == NULL_TREE)
38876 atomic_update_decl
38877 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38878 get_identifier ("__atomic_feupdateenv"),
38879 build_function_type_list (void_type_node,
38880 const_double_ptr,
38881 NULL_TREE));
38882 TREE_PUBLIC (atomic_update_decl) = 1;
38883 DECL_EXTERNAL (atomic_update_decl) = 1;
38886 tree fenv_var = create_tmp_var_raw (double_type_node);
38887 TREE_ADDRESSABLE (fenv_var) = 1;
38888 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38890 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38891 *clear = build_call_expr (atomic_clear_decl, 0);
38892 *update = build_call_expr (atomic_update_decl, 1,
38893 fold_convert (const_double_ptr, fenv_addr));
38894 #endif
38895 return;
38898 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38899 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38900 tree call_mffs = build_call_expr (mffs, 0);
38902 /* Generates the equivalent of feholdexcept (&fenv_var)
38904 *fenv_var = __builtin_mffs ();
38905 double fenv_hold;
38906 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38907 __builtin_mtfsf (0xff, fenv_hold); */
38909 /* Mask to clear everything except for the rounding modes and non-IEEE
38910 arithmetic flag. */
38911 const unsigned HOST_WIDE_INT hold_exception_mask =
38912 HOST_WIDE_INT_C (0xffffffff00000007);
38914 tree fenv_var = create_tmp_var_raw (double_type_node);
38916 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38918 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38919 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38920 build_int_cst (uint64_type_node,
38921 hold_exception_mask));
38923 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38924 fenv_llu_and);
38926 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38927 build_int_cst (unsigned_type_node, 0xff),
38928 fenv_hold_mtfsf);
38930 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38932 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38934 double fenv_clear = __builtin_mffs ();
38935 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38936 __builtin_mtfsf (0xff, fenv_clear); */
38938 /* Mask to clear everything except for the rounding modes and non-IEEE
38939 arithmetic flag. */
38940 const unsigned HOST_WIDE_INT clear_exception_mask =
38941 HOST_WIDE_INT_C (0xffffffff00000000);
38943 tree fenv_clear = create_tmp_var_raw (double_type_node);
38945 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38947 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38948 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38949 fenv_clean_llu,
38950 build_int_cst (uint64_type_node,
38951 clear_exception_mask));
38953 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38954 fenv_clear_llu_and);
38956 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38957 build_int_cst (unsigned_type_node, 0xff),
38958 fenv_clear_mtfsf);
38960 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38962 /* Generates the equivalent of feupdateenv (&fenv_var)
38964 double old_fenv = __builtin_mffs ();
38965 double fenv_update;
38966 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38967 (*(uint64_t*)fenv_var 0x1ff80fff);
38968 __builtin_mtfsf (0xff, fenv_update); */
38970 const unsigned HOST_WIDE_INT update_exception_mask =
38971 HOST_WIDE_INT_C (0xffffffff1fffff00);
38972 const unsigned HOST_WIDE_INT new_exception_mask =
38973 HOST_WIDE_INT_C (0x1ff80fff);
38975 tree old_fenv = create_tmp_var_raw (double_type_node);
38976 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38978 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38979 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38980 build_int_cst (uint64_type_node,
38981 update_exception_mask));
38983 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38984 build_int_cst (uint64_type_node,
38985 new_exception_mask));
38987 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38988 old_llu_and, new_llu_and);
38990 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38991 new_llu_mask);
38993 tree update_mtfsf = build_call_expr (mtfsf, 2,
38994 build_int_cst (unsigned_type_node, 0xff),
38995 fenv_update_mtfsf);
38997 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39000 void
39001 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39003 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39005 rtx_tmp0 = gen_reg_rtx (V2DImode);
39006 rtx_tmp1 = gen_reg_rtx (V2DImode);
39008 /* The destination of the vmrgew instruction layout is:
39009 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39010 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39011 vmrgew instruction will be correct. */
39012 if (VECTOR_ELT_ORDER_BIG)
39014 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39015 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39017 else
39019 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39020 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39023 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39024 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39026 if (signed_convert)
39028 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39029 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39031 else
39033 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39034 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39037 if (VECTOR_ELT_ORDER_BIG)
39038 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39039 else
39040 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39043 void
39044 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39045 rtx src2)
39047 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39049 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39050 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39052 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39053 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39055 rtx_tmp2 = gen_reg_rtx (V4SImode);
39056 rtx_tmp3 = gen_reg_rtx (V4SImode);
39058 if (signed_convert)
39060 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39061 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39063 else
39065 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39066 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39069 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39072 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39074 static bool
39075 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39076 optimization_type opt_type)
39078 switch (op)
39080 case rsqrt_optab:
39081 return (opt_type == OPTIMIZE_FOR_SPEED
39082 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39084 default:
39085 return true;
39089 struct gcc_target targetm = TARGET_INITIALIZER;
39091 #include "gt-rs6000.h"