gcc/
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob386a5cebc2f7653d9d386c8d8f7819ff1957243f
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "attribs.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "explow.h"
51 #include "expr.h"
52 #include "output.h"
53 #include "dbxout.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "sched-int.h"
58 #include "gimplify.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
63 #include "intl.h"
64 #include "params.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
68 #include "builtins.h"
69 #include "context.h"
70 #include "tree-pass.h"
71 #include "except.h"
72 #if TARGET_XCOFF
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
74 #endif
75 #if TARGET_MACHO
76 #include "gstab.h" /* for N_SLINE */
77 #endif
78 #include "case-cfn-macros.h"
79 #include "ppc-auxv.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
87 #endif
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 /* Structure used to define the rs6000 stack */
93 typedef struct rs6000_stack {
94 int reload_completed; /* stack info won't change from here on */
95 int first_gp_reg_save; /* first callee saved GP register used */
96 int first_fp_reg_save; /* first callee saved FP register used */
97 int first_altivec_reg_save; /* first callee saved AltiVec register used */
98 int lr_save_p; /* true if the link reg needs to be saved */
99 int cr_save_p; /* true if the CR reg needs to be saved */
100 unsigned int vrsave_mask; /* mask of vec registers to save */
101 int push_p; /* true if we need to allocate stack space */
102 int calls_p; /* true if the function makes any calls */
103 int world_save_p; /* true if we're saving *everything*:
104 r13-r31, cr, f14-f31, vrsave, v20-v31 */
105 enum rs6000_abi abi; /* which ABI to use */
106 int gp_save_offset; /* offset to save GP regs from initial SP */
107 int fp_save_offset; /* offset to save FP regs from initial SP */
108 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
109 int lr_save_offset; /* offset to save LR from initial SP */
110 int cr_save_offset; /* offset to save CR from initial SP */
111 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
112 int varargs_save_offset; /* offset to save the varargs registers */
113 int ehrd_offset; /* offset to EH return data */
114 int ehcr_offset; /* offset to EH CR field data */
115 int reg_size; /* register size (4 or 8) */
116 HOST_WIDE_INT vars_size; /* variable save area size */
117 int parm_size; /* outgoing parameter size */
118 int save_size; /* save area size */
119 int fixed_size; /* fixed size of stack frame */
120 int gp_size; /* size of saved GP registers */
121 int fp_size; /* size of saved FP registers */
122 int altivec_size; /* size of saved AltiVec registers */
123 int cr_size; /* size to hold CR if not in fixed area */
124 int vrsave_size; /* size to hold VRSAVE */
125 int altivec_padding_size; /* size of altivec alignment padding */
126 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
127 int savres_strategy;
128 } rs6000_stack_t;
130 /* A C structure for machine-specific, per-function data.
131 This is added to the cfun structure. */
132 typedef struct GTY(()) machine_function
134 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
135 int ra_needs_full_frame;
136 /* Flags if __builtin_return_address (0) was used. */
137 int ra_need_lr;
138 /* Cache lr_save_p after expansion of builtin_eh_return. */
139 int lr_save_state;
140 /* Whether we need to save the TOC to the reserved stack location in the
141 function prologue. */
142 bool save_toc_in_prologue;
143 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
144 varargs save area. */
145 HOST_WIDE_INT varargs_save_offset;
146 /* Alternative internal arg pointer for -fsplit-stack. */
147 rtx split_stack_arg_pointer;
148 bool split_stack_argp_used;
149 /* Flag if r2 setup is needed with ELFv2 ABI. */
150 bool r2_setup_needed;
151 /* The number of components we use for separate shrink-wrapping. */
152 int n_components;
153 /* The components already handled by separate shrink-wrapping, which should
154 not be considered by the prologue and epilogue. */
155 bool gpr_is_wrapped_separately[32];
156 bool fpr_is_wrapped_separately[32];
157 bool lr_is_wrapped_separately;
158 } machine_function;
160 /* Support targetm.vectorize.builtin_mask_for_load. */
161 static GTY(()) tree altivec_builtin_mask_for_load;
163 /* Set to nonzero once AIX common-mode calls have been defined. */
164 static GTY(()) int common_mode_defined;
166 /* Label number of label created for -mrelocatable, to call to so we can
167 get the address of the GOT section */
168 static int rs6000_pic_labelno;
170 #ifdef USING_ELFOS_H
171 /* Counter for labels which are to be placed in .fixup. */
172 int fixuplabelno = 0;
173 #endif
175 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
176 int dot_symbols;
178 /* Specify the machine mode that pointers have. After generation of rtl, the
179 compiler makes no further distinction between pointers and any other objects
180 of this machine mode. The type is unsigned since not all things that
181 include rs6000.h also include machmode.h. */
182 unsigned rs6000_pmode;
184 /* Width in bits of a pointer. */
185 unsigned rs6000_pointer_size;
187 #ifdef HAVE_AS_GNU_ATTRIBUTE
188 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
189 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
190 # endif
191 /* Flag whether floating point values have been passed/returned.
192 Note that this doesn't say whether fprs are used, since the
193 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
194 should be set for soft-float values passed in gprs and ieee128
195 values passed in vsx registers. */
196 static bool rs6000_passes_float;
197 static bool rs6000_passes_long_double;
198 /* Flag whether vector values have been passed/returned. */
199 static bool rs6000_passes_vector;
200 /* Flag whether small (<= 8 byte) structures have been returned. */
201 static bool rs6000_returns_struct;
202 #endif
204 /* Value is TRUE if register/mode pair is acceptable. */
205 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
207 /* Maximum number of registers needed for a given register class and mode. */
208 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
210 /* How many registers are needed for a given register and mode. */
211 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
213 /* Map register number to register class. */
214 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
216 static int dbg_cost_ctrl;
218 /* Built in types. */
219 tree rs6000_builtin_types[RS6000_BTI_MAX];
220 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
222 /* Flag to say the TOC is initialized */
223 int toc_initialized, need_toc_init;
224 char toc_label_name[10];
226 /* Cached value of rs6000_variable_issue. This is cached in
227 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
228 static short cached_can_issue_more;
230 static GTY(()) section *read_only_data_section;
231 static GTY(()) section *private_data_section;
232 static GTY(()) section *tls_data_section;
233 static GTY(()) section *tls_private_data_section;
234 static GTY(()) section *read_only_private_data_section;
235 static GTY(()) section *sdata2_section;
236 static GTY(()) section *toc_section;
238 struct builtin_description
240 const HOST_WIDE_INT mask;
241 const enum insn_code icode;
242 const char *const name;
243 const enum rs6000_builtins code;
246 /* Describe the vector unit used for modes. */
247 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
248 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
250 /* Register classes for various constraints that are based on the target
251 switches. */
252 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
254 /* Describe the alignment of a vector. */
255 int rs6000_vector_align[NUM_MACHINE_MODES];
257 /* Map selected modes to types for builtins. */
258 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
260 /* What modes to automatically generate reciprocal divide estimate (fre) and
261 reciprocal sqrt (frsqrte) for. */
262 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
264 /* Masks to determine which reciprocal esitmate instructions to generate
265 automatically. */
266 enum rs6000_recip_mask {
267 RECIP_SF_DIV = 0x001, /* Use divide estimate */
268 RECIP_DF_DIV = 0x002,
269 RECIP_V4SF_DIV = 0x004,
270 RECIP_V2DF_DIV = 0x008,
272 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
273 RECIP_DF_RSQRT = 0x020,
274 RECIP_V4SF_RSQRT = 0x040,
275 RECIP_V2DF_RSQRT = 0x080,
277 /* Various combination of flags for -mrecip=xxx. */
278 RECIP_NONE = 0,
279 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
280 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
281 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
283 RECIP_HIGH_PRECISION = RECIP_ALL,
285 /* On low precision machines like the power5, don't enable double precision
286 reciprocal square root estimate, since it isn't accurate enough. */
287 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
290 /* -mrecip options. */
291 static struct
293 const char *string; /* option name */
294 unsigned int mask; /* mask bits to set */
295 } recip_options[] = {
296 { "all", RECIP_ALL },
297 { "none", RECIP_NONE },
298 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
299 | RECIP_V2DF_DIV) },
300 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
301 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
302 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
303 | RECIP_V2DF_RSQRT) },
304 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
305 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
308 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
309 static const struct
311 const char *cpu;
312 unsigned int cpuid;
313 } cpu_is_info[] = {
314 { "power9", PPC_PLATFORM_POWER9 },
315 { "power8", PPC_PLATFORM_POWER8 },
316 { "power7", PPC_PLATFORM_POWER7 },
317 { "power6x", PPC_PLATFORM_POWER6X },
318 { "power6", PPC_PLATFORM_POWER6 },
319 { "power5+", PPC_PLATFORM_POWER5_PLUS },
320 { "power5", PPC_PLATFORM_POWER5 },
321 { "ppc970", PPC_PLATFORM_PPC970 },
322 { "power4", PPC_PLATFORM_POWER4 },
323 { "ppca2", PPC_PLATFORM_PPCA2 },
324 { "ppc476", PPC_PLATFORM_PPC476 },
325 { "ppc464", PPC_PLATFORM_PPC464 },
326 { "ppc440", PPC_PLATFORM_PPC440 },
327 { "ppc405", PPC_PLATFORM_PPC405 },
328 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
331 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
332 static const struct
334 const char *hwcap;
335 int mask;
336 unsigned int id;
337 } cpu_supports_info[] = {
338 /* AT_HWCAP masks. */
339 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
340 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
341 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
342 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
343 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
344 { "booke", PPC_FEATURE_BOOKE, 0 },
345 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
346 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
347 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
348 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
349 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
350 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
351 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
352 { "notb", PPC_FEATURE_NO_TB, 0 },
353 { "pa6t", PPC_FEATURE_PA6T, 0 },
354 { "power4", PPC_FEATURE_POWER4, 0 },
355 { "power5", PPC_FEATURE_POWER5, 0 },
356 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
357 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
358 { "ppc32", PPC_FEATURE_32, 0 },
359 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
360 { "ppc64", PPC_FEATURE_64, 0 },
361 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
362 { "smt", PPC_FEATURE_SMT, 0 },
363 { "spe", PPC_FEATURE_HAS_SPE, 0 },
364 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
365 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
366 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
368 /* AT_HWCAP2 masks. */
369 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
370 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
371 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
372 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
373 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
374 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
375 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
376 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
377 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
378 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
379 { "darn", PPC_FEATURE2_DARN, 1 },
380 { "scv", PPC_FEATURE2_SCV, 1 }
383 /* On PowerPC, we have a limited number of target clones that we care about
384 which means we can use an array to hold the options, rather than having more
385 elaborate data structures to identify each possible variation. Order the
386 clones from the default to the highest ISA. */
387 enum {
388 CLONE_DEFAULT = 0, /* default clone. */
389 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
390 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
391 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
392 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
393 CLONE_MAX
396 /* Map compiler ISA bits into HWCAP names. */
397 struct clone_map {
398 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
399 const char *name; /* name to use in __builtin_cpu_supports. */
402 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
403 { 0, "" }, /* Default options. */
404 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
405 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
406 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
407 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
411 /* Newer LIBCs explicitly export this symbol to declare that they provide
412 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
413 reference to this symbol whenever we expand a CPU builtin, so that
414 we never link against an old LIBC. */
415 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
417 /* True if we have expanded a CPU builtin. */
418 bool cpu_builtin_p;
420 /* Pointer to function (in rs6000-c.c) that can define or undefine target
421 macros that have changed. Languages that don't support the preprocessor
422 don't link in rs6000-c.c, so we can't call it directly. */
423 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
425 /* Simplfy register classes into simpler classifications. We assume
426 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
427 check for standard register classes (gpr/floating/altivec/vsx) and
428 floating/vector classes (float/altivec/vsx). */
430 enum rs6000_reg_type {
431 NO_REG_TYPE,
432 PSEUDO_REG_TYPE,
433 GPR_REG_TYPE,
434 VSX_REG_TYPE,
435 ALTIVEC_REG_TYPE,
436 FPR_REG_TYPE,
437 SPR_REG_TYPE,
438 CR_REG_TYPE,
441 /* Map register class to register type. */
442 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
444 /* First/last register type for the 'normal' register types (i.e. general
445 purpose, floating point, altivec, and VSX registers). */
446 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
448 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
451 /* Register classes we care about in secondary reload or go if legitimate
452 address. We only need to worry about GPR, FPR, and Altivec registers here,
453 along an ANY field that is the OR of the 3 register classes. */
455 enum rs6000_reload_reg_type {
456 RELOAD_REG_GPR, /* General purpose registers. */
457 RELOAD_REG_FPR, /* Traditional floating point regs. */
458 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
459 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
460 N_RELOAD_REG
463 /* For setting up register classes, loop through the 3 register classes mapping
464 into real registers, and skip the ANY class, which is just an OR of the
465 bits. */
466 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
467 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
469 /* Map reload register type to a register in the register class. */
470 struct reload_reg_map_type {
471 const char *name; /* Register class name. */
472 int reg; /* Register in the register class. */
475 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
476 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
477 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
478 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
479 { "Any", -1 }, /* RELOAD_REG_ANY. */
482 /* Mask bits for each register class, indexed per mode. Historically the
483 compiler has been more restrictive which types can do PRE_MODIFY instead of
484 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
485 typedef unsigned char addr_mask_type;
487 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
488 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
489 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
490 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
491 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
492 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
493 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
494 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
496 /* Register type masks based on the type, of valid addressing modes. */
497 struct rs6000_reg_addr {
498 enum insn_code reload_load; /* INSN to reload for loading. */
499 enum insn_code reload_store; /* INSN to reload for storing. */
500 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
501 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
502 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
503 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
504 /* INSNs for fusing addi with loads
505 or stores for each reg. class. */
506 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
507 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
508 /* INSNs for fusing addis with loads
509 or stores for each reg. class. */
510 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
511 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
512 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
513 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
514 bool fused_toc; /* Mode supports TOC fusion. */
517 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
519 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
520 static inline bool
521 mode_supports_pre_incdec_p (machine_mode mode)
523 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
524 != 0);
527 /* Helper function to say whether a mode supports PRE_MODIFY. */
528 static inline bool
529 mode_supports_pre_modify_p (machine_mode mode)
531 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
532 != 0);
535 /* Given that there exists at least one variable that is set (produced)
536 by OUT_INSN and read (consumed) by IN_INSN, return true iff
537 IN_INSN represents one or more memory store operations and none of
538 the variables set by OUT_INSN is used by IN_INSN as the address of a
539 store operation. If either IN_INSN or OUT_INSN does not represent
540 a "single" RTL SET expression (as loosely defined by the
541 implementation of the single_set function) or a PARALLEL with only
542 SETs, CLOBBERs, and USEs inside, this function returns false.
544 This rs6000-specific version of store_data_bypass_p checks for
545 certain conditions that result in assertion failures (and internal
546 compiler errors) in the generic store_data_bypass_p function and
547 returns false rather than calling store_data_bypass_p if one of the
548 problematic conditions is detected. */
551 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
553 rtx out_set, in_set;
554 rtx out_pat, in_pat;
555 rtx out_exp, in_exp;
556 int i, j;
558 in_set = single_set (in_insn);
559 if (in_set)
561 if (MEM_P (SET_DEST (in_set)))
563 out_set = single_set (out_insn);
564 if (!out_set)
566 out_pat = PATTERN (out_insn);
567 if (GET_CODE (out_pat) == PARALLEL)
569 for (i = 0; i < XVECLEN (out_pat, 0); i++)
571 out_exp = XVECEXP (out_pat, 0, i);
572 if ((GET_CODE (out_exp) == CLOBBER)
573 || (GET_CODE (out_exp) == USE))
574 continue;
575 else if (GET_CODE (out_exp) != SET)
576 return false;
582 else
584 in_pat = PATTERN (in_insn);
585 if (GET_CODE (in_pat) != PARALLEL)
586 return false;
588 for (i = 0; i < XVECLEN (in_pat, 0); i++)
590 in_exp = XVECEXP (in_pat, 0, i);
591 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
592 continue;
593 else if (GET_CODE (in_exp) != SET)
594 return false;
596 if (MEM_P (SET_DEST (in_exp)))
598 out_set = single_set (out_insn);
599 if (!out_set)
601 out_pat = PATTERN (out_insn);
602 if (GET_CODE (out_pat) != PARALLEL)
603 return false;
604 for (j = 0; j < XVECLEN (out_pat, 0); j++)
606 out_exp = XVECEXP (out_pat, 0, j);
607 if ((GET_CODE (out_exp) == CLOBBER)
608 || (GET_CODE (out_exp) == USE))
609 continue;
610 else if (GET_CODE (out_exp) != SET)
611 return false;
617 return store_data_bypass_p (out_insn, in_insn);
620 /* Return true if we have D-form addressing in altivec registers. */
621 static inline bool
622 mode_supports_vmx_dform (machine_mode mode)
624 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
627 /* Return true if we have D-form addressing in VSX registers. This addressing
628 is more limited than normal d-form addressing in that the offset must be
629 aligned on a 16-byte boundary. */
630 static inline bool
631 mode_supports_vsx_dform_quad (machine_mode mode)
633 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
634 != 0);
638 /* Target cpu costs. */
640 struct processor_costs {
641 const int mulsi; /* cost of SImode multiplication. */
642 const int mulsi_const; /* cost of SImode multiplication by constant. */
643 const int mulsi_const9; /* cost of SImode mult by short constant. */
644 const int muldi; /* cost of DImode multiplication. */
645 const int divsi; /* cost of SImode division. */
646 const int divdi; /* cost of DImode division. */
647 const int fp; /* cost of simple SFmode and DFmode insns. */
648 const int dmul; /* cost of DFmode multiplication (and fmadd). */
649 const int sdiv; /* cost of SFmode division (fdivs). */
650 const int ddiv; /* cost of DFmode division (fdiv). */
651 const int cache_line_size; /* cache line size in bytes. */
652 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
653 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
654 const int simultaneous_prefetches; /* number of parallel prefetch
655 operations. */
656 const int sfdf_convert; /* cost of SF->DF conversion. */
659 const struct processor_costs *rs6000_cost;
661 /* Processor costs (relative to an add) */
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_Q
1275 #undef RS6000_BUILTIN_X
1277 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1278 { NAME, ICODE, MASK, ATTR },
1280 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1281 { NAME, ICODE, MASK, ATTR },
1283 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1284 { NAME, ICODE, MASK, ATTR },
1286 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1287 { NAME, ICODE, MASK, ATTR },
1289 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1290 { NAME, ICODE, MASK, ATTR },
1292 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1293 { NAME, ICODE, MASK, ATTR },
1295 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1296 { NAME, ICODE, MASK, ATTR },
1298 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1299 { NAME, ICODE, MASK, ATTR },
1301 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1302 { NAME, ICODE, MASK, ATTR },
1304 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1305 { NAME, ICODE, MASK, ATTR },
1307 struct rs6000_builtin_info_type {
1308 const char *name;
1309 const enum insn_code icode;
1310 const HOST_WIDE_INT mask;
1311 const unsigned attr;
1314 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1316 #include "rs6000-builtin.def"
1319 #undef RS6000_BUILTIN_0
1320 #undef RS6000_BUILTIN_1
1321 #undef RS6000_BUILTIN_2
1322 #undef RS6000_BUILTIN_3
1323 #undef RS6000_BUILTIN_A
1324 #undef RS6000_BUILTIN_D
1325 #undef RS6000_BUILTIN_H
1326 #undef RS6000_BUILTIN_P
1327 #undef RS6000_BUILTIN_Q
1328 #undef RS6000_BUILTIN_X
1330 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1331 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1334 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1335 static struct machine_function * rs6000_init_machine_status (void);
1336 static int rs6000_ra_ever_killed (void);
1337 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1338 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1339 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1340 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1341 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1342 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1343 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1344 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1345 bool);
1346 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1347 unsigned int);
1348 static bool is_microcoded_insn (rtx_insn *);
1349 static bool is_nonpipeline_insn (rtx_insn *);
1350 static bool is_cracked_insn (rtx_insn *);
1351 static bool is_load_insn (rtx, rtx *);
1352 static bool is_store_insn (rtx, rtx *);
1353 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1354 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1355 static bool insn_must_be_first_in_group (rtx_insn *);
1356 static bool insn_must_be_last_in_group (rtx_insn *);
1357 static void altivec_init_builtins (void);
1358 static tree builtin_function_type (machine_mode, machine_mode,
1359 machine_mode, machine_mode,
1360 enum rs6000_builtins, const char *name);
1361 static void rs6000_common_init_builtins (void);
1362 static void paired_init_builtins (void);
1363 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1364 static void htm_init_builtins (void);
1365 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1366 static rs6000_stack_t *rs6000_stack_info (void);
1367 static void is_altivec_return_reg (rtx, void *);
1368 int easy_vector_constant (rtx, machine_mode);
1369 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1370 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1371 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1372 bool, bool);
1373 #if TARGET_MACHO
1374 static void macho_branch_islands (void);
1375 #endif
1376 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1377 int, int *);
1378 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1379 int, int, int *);
1380 static bool rs6000_mode_dependent_address (const_rtx);
1381 static bool rs6000_debug_mode_dependent_address (const_rtx);
1382 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1383 machine_mode, rtx);
1384 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1385 machine_mode,
1386 rtx);
1387 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1388 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1389 enum reg_class);
1390 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1391 machine_mode);
1392 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1393 enum reg_class,
1394 machine_mode);
1395 static bool rs6000_cannot_change_mode_class (machine_mode,
1396 machine_mode,
1397 enum reg_class);
1398 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1399 machine_mode,
1400 enum reg_class);
1401 static bool rs6000_save_toc_in_prologue_p (void);
1402 static rtx rs6000_internal_arg_pointer (void);
1404 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1405 int, int *)
1406 = rs6000_legitimize_reload_address;
1408 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1409 = rs6000_mode_dependent_address;
1411 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1412 machine_mode, rtx)
1413 = rs6000_secondary_reload_class;
1415 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1416 = rs6000_preferred_reload_class;
1418 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1419 machine_mode)
1420 = rs6000_secondary_memory_needed;
1422 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1423 machine_mode,
1424 enum reg_class)
1425 = rs6000_cannot_change_mode_class;
1427 const int INSN_NOT_AVAILABLE = -1;
1429 static void rs6000_print_isa_options (FILE *, int, const char *,
1430 HOST_WIDE_INT);
1431 static void rs6000_print_builtin_options (FILE *, int, const char *,
1432 HOST_WIDE_INT);
1433 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1435 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1436 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1437 enum rs6000_reg_type,
1438 machine_mode,
1439 secondary_reload_info *,
1440 bool);
1441 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1442 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1443 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1445 /* Hash table stuff for keeping track of TOC entries. */
1447 struct GTY((for_user)) toc_hash_struct
1449 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1450 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1451 rtx key;
1452 machine_mode key_mode;
1453 int labelno;
1456 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1458 static hashval_t hash (toc_hash_struct *);
1459 static bool equal (toc_hash_struct *, toc_hash_struct *);
1462 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1464 /* Hash table to keep track of the argument types for builtin functions. */
1466 struct GTY((for_user)) builtin_hash_struct
1468 tree type;
1469 machine_mode mode[4]; /* return value + 3 arguments. */
1470 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1473 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1475 static hashval_t hash (builtin_hash_struct *);
1476 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1479 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1482 /* Default register names. */
1483 char rs6000_reg_names[][8] =
1485 "0", "1", "2", "3", "4", "5", "6", "7",
1486 "8", "9", "10", "11", "12", "13", "14", "15",
1487 "16", "17", "18", "19", "20", "21", "22", "23",
1488 "24", "25", "26", "27", "28", "29", "30", "31",
1489 "0", "1", "2", "3", "4", "5", "6", "7",
1490 "8", "9", "10", "11", "12", "13", "14", "15",
1491 "16", "17", "18", "19", "20", "21", "22", "23",
1492 "24", "25", "26", "27", "28", "29", "30", "31",
1493 "mq", "lr", "ctr","ap",
1494 "0", "1", "2", "3", "4", "5", "6", "7",
1495 "ca",
1496 /* AltiVec registers. */
1497 "0", "1", "2", "3", "4", "5", "6", "7",
1498 "8", "9", "10", "11", "12", "13", "14", "15",
1499 "16", "17", "18", "19", "20", "21", "22", "23",
1500 "24", "25", "26", "27", "28", "29", "30", "31",
1501 "vrsave", "vscr",
1502 /* Soft frame pointer. */
1503 "sfp",
1504 /* HTM SPR registers. */
1505 "tfhar", "tfiar", "texasr"
1508 #ifdef TARGET_REGNAMES
1509 static const char alt_reg_names[][8] =
1511 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1512 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1513 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1514 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1515 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1516 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1517 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1518 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1519 "mq", "lr", "ctr", "ap",
1520 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1521 "ca",
1522 /* AltiVec registers. */
1523 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1524 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1525 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1526 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1527 "vrsave", "vscr",
1528 /* Soft frame pointer. */
1529 "sfp",
1530 /* HTM SPR registers. */
1531 "tfhar", "tfiar", "texasr"
1533 #endif
1535 /* Table of valid machine attributes. */
1537 static const struct attribute_spec rs6000_attribute_table[] =
1539 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1540 affects_type_identity } */
1541 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1542 false },
1543 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1544 false },
1545 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1546 false },
1547 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1548 false },
1549 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1550 false },
1551 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1552 SUBTARGET_ATTRIBUTE_TABLE,
1553 #endif
1554 { NULL, 0, 0, false, false, false, NULL, false }
1557 #ifndef TARGET_PROFILE_KERNEL
1558 #define TARGET_PROFILE_KERNEL 0
1559 #endif
1561 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1562 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1564 /* Initialize the GCC target structure. */
1565 #undef TARGET_ATTRIBUTE_TABLE
1566 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1567 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1568 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1569 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1570 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1572 #undef TARGET_ASM_ALIGNED_DI_OP
1573 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1575 /* Default unaligned ops are only provided for ELF. Find the ops needed
1576 for non-ELF systems. */
1577 #ifndef OBJECT_FORMAT_ELF
1578 #if TARGET_XCOFF
1579 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1580 64-bit targets. */
1581 #undef TARGET_ASM_UNALIGNED_HI_OP
1582 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1583 #undef TARGET_ASM_UNALIGNED_SI_OP
1584 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1585 #undef TARGET_ASM_UNALIGNED_DI_OP
1586 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1587 #else
1588 /* For Darwin. */
1589 #undef TARGET_ASM_UNALIGNED_HI_OP
1590 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1591 #undef TARGET_ASM_UNALIGNED_SI_OP
1592 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1593 #undef TARGET_ASM_UNALIGNED_DI_OP
1594 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1595 #undef TARGET_ASM_ALIGNED_DI_OP
1596 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1597 #endif
1598 #endif
1600 /* This hook deals with fixups for relocatable code and DI-mode objects
1601 in 64-bit code. */
1602 #undef TARGET_ASM_INTEGER
1603 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1605 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1606 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1607 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1608 #endif
1610 #undef TARGET_SET_UP_BY_PROLOGUE
1611 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1613 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1614 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1615 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1616 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1617 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1618 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1619 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1620 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1621 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1622 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1623 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1624 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1626 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1627 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1629 #undef TARGET_INTERNAL_ARG_POINTER
1630 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1632 #undef TARGET_HAVE_TLS
1633 #define TARGET_HAVE_TLS HAVE_AS_TLS
1635 #undef TARGET_CANNOT_FORCE_CONST_MEM
1636 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1638 #undef TARGET_DELEGITIMIZE_ADDRESS
1639 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1641 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1642 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1644 #undef TARGET_LEGITIMATE_COMBINED_INSN
1645 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1647 #undef TARGET_ASM_FUNCTION_PROLOGUE
1648 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1649 #undef TARGET_ASM_FUNCTION_EPILOGUE
1650 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1652 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1653 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1655 #undef TARGET_LEGITIMIZE_ADDRESS
1656 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1658 #undef TARGET_SCHED_VARIABLE_ISSUE
1659 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1661 #undef TARGET_SCHED_ISSUE_RATE
1662 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1663 #undef TARGET_SCHED_ADJUST_COST
1664 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1665 #undef TARGET_SCHED_ADJUST_PRIORITY
1666 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1667 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1668 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1669 #undef TARGET_SCHED_INIT
1670 #define TARGET_SCHED_INIT rs6000_sched_init
1671 #undef TARGET_SCHED_FINISH
1672 #define TARGET_SCHED_FINISH rs6000_sched_finish
1673 #undef TARGET_SCHED_REORDER
1674 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1675 #undef TARGET_SCHED_REORDER2
1676 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1678 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1679 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1681 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1682 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1684 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1685 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1686 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1687 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1688 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1689 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1690 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1691 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1693 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1694 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1696 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1697 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1698 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1699 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1700 rs6000_builtin_support_vector_misalignment
1701 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1702 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1703 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1704 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1705 rs6000_builtin_vectorization_cost
1706 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1707 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1708 rs6000_preferred_simd_mode
1709 #undef TARGET_VECTORIZE_INIT_COST
1710 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1711 #undef TARGET_VECTORIZE_ADD_STMT_COST
1712 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1713 #undef TARGET_VECTORIZE_FINISH_COST
1714 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1715 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1716 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1718 #undef TARGET_INIT_BUILTINS
1719 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1720 #undef TARGET_BUILTIN_DECL
1721 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1723 #undef TARGET_FOLD_BUILTIN
1724 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1725 #undef TARGET_GIMPLE_FOLD_BUILTIN
1726 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1728 #undef TARGET_EXPAND_BUILTIN
1729 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1731 #undef TARGET_MANGLE_TYPE
1732 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1734 #undef TARGET_INIT_LIBFUNCS
1735 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1737 #if TARGET_MACHO
1738 #undef TARGET_BINDS_LOCAL_P
1739 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1740 #endif
1742 #undef TARGET_MS_BITFIELD_LAYOUT_P
1743 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1745 #undef TARGET_ASM_OUTPUT_MI_THUNK
1746 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1748 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1749 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1751 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1752 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1754 #undef TARGET_REGISTER_MOVE_COST
1755 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1756 #undef TARGET_MEMORY_MOVE_COST
1757 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1758 #undef TARGET_CANNOT_COPY_INSN_P
1759 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1760 #undef TARGET_RTX_COSTS
1761 #define TARGET_RTX_COSTS rs6000_rtx_costs
1762 #undef TARGET_ADDRESS_COST
1763 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1765 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1766 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1768 #undef TARGET_PROMOTE_FUNCTION_MODE
1769 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1771 #undef TARGET_RETURN_IN_MEMORY
1772 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1774 #undef TARGET_RETURN_IN_MSB
1775 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1777 #undef TARGET_SETUP_INCOMING_VARARGS
1778 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1780 /* Always strict argument naming on rs6000. */
1781 #undef TARGET_STRICT_ARGUMENT_NAMING
1782 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1783 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1784 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1785 #undef TARGET_SPLIT_COMPLEX_ARG
1786 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1787 #undef TARGET_MUST_PASS_IN_STACK
1788 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1789 #undef TARGET_PASS_BY_REFERENCE
1790 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1791 #undef TARGET_ARG_PARTIAL_BYTES
1792 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1793 #undef TARGET_FUNCTION_ARG_ADVANCE
1794 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1795 #undef TARGET_FUNCTION_ARG
1796 #define TARGET_FUNCTION_ARG rs6000_function_arg
1797 #undef TARGET_FUNCTION_ARG_BOUNDARY
1798 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1800 #undef TARGET_BUILD_BUILTIN_VA_LIST
1801 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1803 #undef TARGET_EXPAND_BUILTIN_VA_START
1804 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1806 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1807 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1809 #undef TARGET_EH_RETURN_FILTER_MODE
1810 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1812 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1813 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1815 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1816 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1818 #undef TARGET_FLOATN_MODE
1819 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1821 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1822 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1824 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1825 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1827 #undef TARGET_MD_ASM_ADJUST
1828 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1830 #undef TARGET_OPTION_OVERRIDE
1831 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1833 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1834 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1835 rs6000_builtin_vectorized_function
1837 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1838 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1839 rs6000_builtin_md_vectorized_function
1841 #undef TARGET_STACK_PROTECT_GUARD
1842 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1844 #if !TARGET_MACHO
1845 #undef TARGET_STACK_PROTECT_FAIL
1846 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1847 #endif
1849 #ifdef HAVE_AS_TLS
1850 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1851 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1852 #endif
1854 /* Use a 32-bit anchor range. This leads to sequences like:
1856 addis tmp,anchor,high
1857 add dest,tmp,low
1859 where tmp itself acts as an anchor, and can be shared between
1860 accesses to the same 64k page. */
1861 #undef TARGET_MIN_ANCHOR_OFFSET
1862 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1863 #undef TARGET_MAX_ANCHOR_OFFSET
1864 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1865 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1866 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1867 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1868 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1870 #undef TARGET_BUILTIN_RECIPROCAL
1871 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1873 #undef TARGET_SECONDARY_RELOAD
1874 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1876 #undef TARGET_LEGITIMATE_ADDRESS_P
1877 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1879 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1880 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1882 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1883 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1885 #undef TARGET_CAN_ELIMINATE
1886 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1888 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1889 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1891 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1892 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1894 #undef TARGET_TRAMPOLINE_INIT
1895 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1897 #undef TARGET_FUNCTION_VALUE
1898 #define TARGET_FUNCTION_VALUE rs6000_function_value
1900 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1901 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1903 #undef TARGET_OPTION_SAVE
1904 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1906 #undef TARGET_OPTION_RESTORE
1907 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1909 #undef TARGET_OPTION_PRINT
1910 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1912 #undef TARGET_CAN_INLINE_P
1913 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1915 #undef TARGET_SET_CURRENT_FUNCTION
1916 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1918 #undef TARGET_LEGITIMATE_CONSTANT_P
1919 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1921 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1922 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1924 #undef TARGET_CAN_USE_DOLOOP_P
1925 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1927 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1928 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1930 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1931 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1932 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1933 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1934 #undef TARGET_UNWIND_WORD_MODE
1935 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1937 #undef TARGET_OFFLOAD_OPTIONS
1938 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1940 #undef TARGET_C_MODE_FOR_SUFFIX
1941 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1943 #undef TARGET_INVALID_BINARY_OP
1944 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1946 #undef TARGET_OPTAB_SUPPORTED_P
1947 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1949 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1950 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1952 #undef TARGET_COMPARE_VERSION_PRIORITY
1953 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1955 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1956 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1957 rs6000_generate_version_dispatcher_body
1959 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1960 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1961 rs6000_get_function_versions_dispatcher
1963 #undef TARGET_OPTION_FUNCTION_VERSIONS
1964 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1968 /* Processor table. */
1969 struct rs6000_ptt
1971 const char *const name; /* Canonical processor name. */
1972 const enum processor_type processor; /* Processor type enum value. */
1973 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1976 static struct rs6000_ptt const processor_target_table[] =
1978 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1979 #include "rs6000-cpus.def"
1980 #undef RS6000_CPU
1983 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1984 name is invalid. */
1986 static int
1987 rs6000_cpu_name_lookup (const char *name)
1989 size_t i;
1991 if (name != NULL)
1993 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1994 if (! strcmp (name, processor_target_table[i].name))
1995 return (int)i;
1998 return -1;
2002 /* Return number of consecutive hard regs needed starting at reg REGNO
2003 to hold something of mode MODE.
2004 This is ordinarily the length in words of a value of mode MODE
2005 but can be less for certain modes in special long registers.
2007 POWER and PowerPC GPRs hold 32 bits worth;
2008 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2010 static int
2011 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2013 unsigned HOST_WIDE_INT reg_size;
2015 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2016 128-bit floating point that can go in vector registers, which has VSX
2017 memory addressing. */
2018 if (FP_REGNO_P (regno))
2019 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2020 ? UNITS_PER_VSX_WORD
2021 : UNITS_PER_FP_WORD);
2023 else if (ALTIVEC_REGNO_P (regno))
2024 reg_size = UNITS_PER_ALTIVEC_WORD;
2026 else
2027 reg_size = UNITS_PER_WORD;
2029 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2032 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2033 MODE. */
2034 static int
2035 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
2037 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2039 if (COMPLEX_MODE_P (mode))
2040 mode = GET_MODE_INNER (mode);
2042 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2043 register combinations, and use PTImode where we need to deal with quad
2044 word memory operations. Don't allow quad words in the argument or frame
2045 pointer registers, just registers 0..31. */
2046 if (mode == PTImode)
2047 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2048 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2049 && ((regno & 1) == 0));
2051 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2052 implementations. Don't allow an item to be split between a FP register
2053 and an Altivec register. Allow TImode in all VSX registers if the user
2054 asked for it. */
2055 if (TARGET_VSX && VSX_REGNO_P (regno)
2056 && (VECTOR_MEM_VSX_P (mode)
2057 || FLOAT128_VECTOR_P (mode)
2058 || reg_addr[mode].scalar_in_vmx_p
2059 || (TARGET_VSX_TIMODE && mode == TImode)
2060 || (TARGET_VADDUQM && mode == V1TImode)))
2062 if (FP_REGNO_P (regno))
2063 return FP_REGNO_P (last_regno);
2065 if (ALTIVEC_REGNO_P (regno))
2067 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2068 return 0;
2070 return ALTIVEC_REGNO_P (last_regno);
2074 /* The GPRs can hold any mode, but values bigger than one register
2075 cannot go past R31. */
2076 if (INT_REGNO_P (regno))
2077 return INT_REGNO_P (last_regno);
2079 /* The float registers (except for VSX vector modes) can only hold floating
2080 modes and DImode. */
2081 if (FP_REGNO_P (regno))
2083 if (FLOAT128_VECTOR_P (mode))
2084 return false;
2086 if (SCALAR_FLOAT_MODE_P (mode)
2087 && (mode != TDmode || (regno % 2) == 0)
2088 && FP_REGNO_P (last_regno))
2089 return 1;
2091 if (GET_MODE_CLASS (mode) == MODE_INT)
2093 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2094 return 1;
2096 if (TARGET_P8_VECTOR && (mode == SImode))
2097 return 1;
2099 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2100 return 1;
2103 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2104 && PAIRED_VECTOR_MODE (mode))
2105 return 1;
2107 return 0;
2110 /* The CR register can only hold CC modes. */
2111 if (CR_REGNO_P (regno))
2112 return GET_MODE_CLASS (mode) == MODE_CC;
2114 if (CA_REGNO_P (regno))
2115 return mode == Pmode || mode == SImode;
2117 /* AltiVec only in AldyVec registers. */
2118 if (ALTIVEC_REGNO_P (regno))
2119 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2120 || mode == V1TImode);
2122 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2123 and it must be able to fit within the register set. */
2125 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2128 /* Print interesting facts about registers. */
2129 static void
2130 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2132 int r, m;
2134 for (r = first_regno; r <= last_regno; ++r)
2136 const char *comma = "";
2137 int len;
2139 if (first_regno == last_regno)
2140 fprintf (stderr, "%s:\t", reg_name);
2141 else
2142 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2144 len = 8;
2145 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2146 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2148 if (len > 70)
2150 fprintf (stderr, ",\n\t");
2151 len = 8;
2152 comma = "";
2155 if (rs6000_hard_regno_nregs[m][r] > 1)
2156 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2157 rs6000_hard_regno_nregs[m][r]);
2158 else
2159 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2161 comma = ", ";
2164 if (call_used_regs[r])
2166 if (len > 70)
2168 fprintf (stderr, ",\n\t");
2169 len = 8;
2170 comma = "";
2173 len += fprintf (stderr, "%s%s", comma, "call-used");
2174 comma = ", ";
2177 if (fixed_regs[r])
2179 if (len > 70)
2181 fprintf (stderr, ",\n\t");
2182 len = 8;
2183 comma = "";
2186 len += fprintf (stderr, "%s%s", comma, "fixed");
2187 comma = ", ";
2190 if (len > 70)
2192 fprintf (stderr, ",\n\t");
2193 comma = "";
2196 len += fprintf (stderr, "%sreg-class = %s", comma,
2197 reg_class_names[(int)rs6000_regno_regclass[r]]);
2198 comma = ", ";
2200 if (len > 70)
2202 fprintf (stderr, ",\n\t");
2203 comma = "";
2206 fprintf (stderr, "%sregno = %d\n", comma, r);
2210 static const char *
2211 rs6000_debug_vector_unit (enum rs6000_vector v)
2213 const char *ret;
2215 switch (v)
2217 case VECTOR_NONE: ret = "none"; break;
2218 case VECTOR_ALTIVEC: ret = "altivec"; break;
2219 case VECTOR_VSX: ret = "vsx"; break;
2220 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2221 case VECTOR_PAIRED: ret = "paired"; break;
2222 case VECTOR_OTHER: ret = "other"; break;
2223 default: ret = "unknown"; break;
2226 return ret;
2229 /* Inner function printing just the address mask for a particular reload
2230 register class. */
2231 DEBUG_FUNCTION char *
2232 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2234 static char ret[8];
2235 char *p = ret;
2237 if ((mask & RELOAD_REG_VALID) != 0)
2238 *p++ = 'v';
2239 else if (keep_spaces)
2240 *p++ = ' ';
2242 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2243 *p++ = 'm';
2244 else if (keep_spaces)
2245 *p++ = ' ';
2247 if ((mask & RELOAD_REG_INDEXED) != 0)
2248 *p++ = 'i';
2249 else if (keep_spaces)
2250 *p++ = ' ';
2252 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2253 *p++ = 'O';
2254 else if ((mask & RELOAD_REG_OFFSET) != 0)
2255 *p++ = 'o';
2256 else if (keep_spaces)
2257 *p++ = ' ';
2259 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2260 *p++ = '+';
2261 else if (keep_spaces)
2262 *p++ = ' ';
2264 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2265 *p++ = '+';
2266 else if (keep_spaces)
2267 *p++ = ' ';
2269 if ((mask & RELOAD_REG_AND_M16) != 0)
2270 *p++ = '&';
2271 else if (keep_spaces)
2272 *p++ = ' ';
2274 *p = '\0';
2276 return ret;
2279 /* Print the address masks in a human readble fashion. */
2280 DEBUG_FUNCTION void
2281 rs6000_debug_print_mode (ssize_t m)
2283 ssize_t rc;
2284 int spaces = 0;
2285 bool fuse_extra_p;
2287 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2288 for (rc = 0; rc < N_RELOAD_REG; rc++)
2289 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2290 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2292 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2293 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2294 fprintf (stderr, " Reload=%c%c",
2295 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2296 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2297 else
2298 spaces += sizeof (" Reload=sl") - 1;
2300 if (reg_addr[m].scalar_in_vmx_p)
2302 fprintf (stderr, "%*s Upper=y", spaces, "");
2303 spaces = 0;
2305 else
2306 spaces += sizeof (" Upper=y") - 1;
2308 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2309 || reg_addr[m].fused_toc);
2310 if (!fuse_extra_p)
2312 for (rc = 0; rc < N_RELOAD_REG; rc++)
2314 if (rc != RELOAD_REG_ANY)
2316 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2317 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2318 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2319 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2320 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2322 fuse_extra_p = true;
2323 break;
2329 if (fuse_extra_p)
2331 fprintf (stderr, "%*s Fuse:", spaces, "");
2332 spaces = 0;
2334 for (rc = 0; rc < N_RELOAD_REG; rc++)
2336 if (rc != RELOAD_REG_ANY)
2338 char load, store;
2340 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2341 load = 'l';
2342 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2343 load = 'L';
2344 else
2345 load = '-';
2347 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2348 store = 's';
2349 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2350 store = 'S';
2351 else
2352 store = '-';
2354 if (load == '-' && store == '-')
2355 spaces += 5;
2356 else
2358 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2359 reload_reg_map[rc].name[0], load, store);
2360 spaces = 0;
2365 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2367 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2368 spaces = 0;
2370 else
2371 spaces += sizeof (" P8gpr") - 1;
2373 if (reg_addr[m].fused_toc)
2375 fprintf (stderr, "%*sToc", (spaces + 1), "");
2376 spaces = 0;
2378 else
2379 spaces += sizeof (" Toc") - 1;
2381 else
2382 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2384 if (rs6000_vector_unit[m] != VECTOR_NONE
2385 || rs6000_vector_mem[m] != VECTOR_NONE)
2387 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2388 spaces, "",
2389 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2390 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2393 fputs ("\n", stderr);
2396 #define DEBUG_FMT_ID "%-32s= "
2397 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2398 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2399 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2401 /* Print various interesting information with -mdebug=reg. */
2402 static void
2403 rs6000_debug_reg_global (void)
2405 static const char *const tf[2] = { "false", "true" };
2406 const char *nl = (const char *)0;
2407 int m;
2408 size_t m1, m2, v;
2409 char costly_num[20];
2410 char nop_num[20];
2411 char flags_buffer[40];
2412 const char *costly_str;
2413 const char *nop_str;
2414 const char *trace_str;
2415 const char *abi_str;
2416 const char *cmodel_str;
2417 struct cl_target_option cl_opts;
2419 /* Modes we want tieable information on. */
2420 static const machine_mode print_tieable_modes[] = {
2421 QImode,
2422 HImode,
2423 SImode,
2424 DImode,
2425 TImode,
2426 PTImode,
2427 SFmode,
2428 DFmode,
2429 TFmode,
2430 IFmode,
2431 KFmode,
2432 SDmode,
2433 DDmode,
2434 TDmode,
2435 V2SImode,
2436 V16QImode,
2437 V8HImode,
2438 V4SImode,
2439 V2DImode,
2440 V1TImode,
2441 V32QImode,
2442 V16HImode,
2443 V8SImode,
2444 V4DImode,
2445 V2TImode,
2446 V2SFmode,
2447 V4SFmode,
2448 V2DFmode,
2449 V8SFmode,
2450 V4DFmode,
2451 CCmode,
2452 CCUNSmode,
2453 CCEQmode,
2456 /* Virtual regs we are interested in. */
2457 const static struct {
2458 int regno; /* register number. */
2459 const char *name; /* register name. */
2460 } virtual_regs[] = {
2461 { STACK_POINTER_REGNUM, "stack pointer:" },
2462 { TOC_REGNUM, "toc: " },
2463 { STATIC_CHAIN_REGNUM, "static chain: " },
2464 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2465 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2466 { ARG_POINTER_REGNUM, "arg pointer: " },
2467 { FRAME_POINTER_REGNUM, "frame pointer:" },
2468 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2469 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2470 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2471 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2472 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2473 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2474 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2475 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2476 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2479 fputs ("\nHard register information:\n", stderr);
2480 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2481 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2482 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2483 LAST_ALTIVEC_REGNO,
2484 "vs");
2485 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2486 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2487 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2488 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2489 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2490 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2492 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2493 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2494 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2496 fprintf (stderr,
2497 "\n"
2498 "d reg_class = %s\n"
2499 "f reg_class = %s\n"
2500 "v reg_class = %s\n"
2501 "wa reg_class = %s\n"
2502 "wb reg_class = %s\n"
2503 "wd reg_class = %s\n"
2504 "we reg_class = %s\n"
2505 "wf reg_class = %s\n"
2506 "wg reg_class = %s\n"
2507 "wh reg_class = %s\n"
2508 "wi reg_class = %s\n"
2509 "wj reg_class = %s\n"
2510 "wk reg_class = %s\n"
2511 "wl reg_class = %s\n"
2512 "wm reg_class = %s\n"
2513 "wo reg_class = %s\n"
2514 "wp reg_class = %s\n"
2515 "wq reg_class = %s\n"
2516 "wr reg_class = %s\n"
2517 "ws reg_class = %s\n"
2518 "wt reg_class = %s\n"
2519 "wu reg_class = %s\n"
2520 "wv reg_class = %s\n"
2521 "ww reg_class = %s\n"
2522 "wx reg_class = %s\n"
2523 "wy reg_class = %s\n"
2524 "wz reg_class = %s\n"
2525 "wA reg_class = %s\n"
2526 "wH reg_class = %s\n"
2527 "wI reg_class = %s\n"
2528 "wJ reg_class = %s\n"
2529 "wK reg_class = %s\n"
2530 "\n",
2531 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2532 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2533 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2534 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2535 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2536 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2537 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2538 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2539 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2540 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2564 nl = "\n";
2565 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2566 rs6000_debug_print_mode (m);
2568 fputs ("\n", stderr);
2570 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2572 machine_mode mode1 = print_tieable_modes[m1];
2573 bool first_time = true;
2575 nl = (const char *)0;
2576 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2578 machine_mode mode2 = print_tieable_modes[m2];
2579 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2581 if (first_time)
2583 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2584 nl = "\n";
2585 first_time = false;
2588 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2592 if (!first_time)
2593 fputs ("\n", stderr);
2596 if (nl)
2597 fputs (nl, stderr);
2599 if (rs6000_recip_control)
2601 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2603 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2604 if (rs6000_recip_bits[m])
2606 fprintf (stderr,
2607 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2608 GET_MODE_NAME (m),
2609 (RS6000_RECIP_AUTO_RE_P (m)
2610 ? "auto"
2611 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2612 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2613 ? "auto"
2614 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2617 fputs ("\n", stderr);
2620 if (rs6000_cpu_index >= 0)
2622 const char *name = processor_target_table[rs6000_cpu_index].name;
2623 HOST_WIDE_INT flags
2624 = processor_target_table[rs6000_cpu_index].target_enable;
2626 sprintf (flags_buffer, "-mcpu=%s flags", name);
2627 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2629 else
2630 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2632 if (rs6000_tune_index >= 0)
2634 const char *name = processor_target_table[rs6000_tune_index].name;
2635 HOST_WIDE_INT flags
2636 = processor_target_table[rs6000_tune_index].target_enable;
2638 sprintf (flags_buffer, "-mtune=%s flags", name);
2639 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2641 else
2642 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2644 cl_target_option_save (&cl_opts, &global_options);
2645 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2646 rs6000_isa_flags);
2648 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2649 rs6000_isa_flags_explicit);
2651 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2652 rs6000_builtin_mask);
2654 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2656 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2657 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2659 switch (rs6000_sched_costly_dep)
2661 case max_dep_latency:
2662 costly_str = "max_dep_latency";
2663 break;
2665 case no_dep_costly:
2666 costly_str = "no_dep_costly";
2667 break;
2669 case all_deps_costly:
2670 costly_str = "all_deps_costly";
2671 break;
2673 case true_store_to_load_dep_costly:
2674 costly_str = "true_store_to_load_dep_costly";
2675 break;
2677 case store_to_load_dep_costly:
2678 costly_str = "store_to_load_dep_costly";
2679 break;
2681 default:
2682 costly_str = costly_num;
2683 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2684 break;
2687 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2689 switch (rs6000_sched_insert_nops)
2691 case sched_finish_regroup_exact:
2692 nop_str = "sched_finish_regroup_exact";
2693 break;
2695 case sched_finish_pad_groups:
2696 nop_str = "sched_finish_pad_groups";
2697 break;
2699 case sched_finish_none:
2700 nop_str = "sched_finish_none";
2701 break;
2703 default:
2704 nop_str = nop_num;
2705 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2706 break;
2709 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2711 switch (rs6000_sdata)
2713 default:
2714 case SDATA_NONE:
2715 break;
2717 case SDATA_DATA:
2718 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2719 break;
2721 case SDATA_SYSV:
2722 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2723 break;
2725 case SDATA_EABI:
2726 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2727 break;
2731 switch (rs6000_traceback)
2733 case traceback_default: trace_str = "default"; break;
2734 case traceback_none: trace_str = "none"; break;
2735 case traceback_part: trace_str = "part"; break;
2736 case traceback_full: trace_str = "full"; break;
2737 default: trace_str = "unknown"; break;
2740 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2742 switch (rs6000_current_cmodel)
2744 case CMODEL_SMALL: cmodel_str = "small"; break;
2745 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2746 case CMODEL_LARGE: cmodel_str = "large"; break;
2747 default: cmodel_str = "unknown"; break;
2750 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2752 switch (rs6000_current_abi)
2754 case ABI_NONE: abi_str = "none"; break;
2755 case ABI_AIX: abi_str = "aix"; break;
2756 case ABI_ELFv2: abi_str = "ELFv2"; break;
2757 case ABI_V4: abi_str = "V4"; break;
2758 case ABI_DARWIN: abi_str = "darwin"; break;
2759 default: abi_str = "unknown"; break;
2762 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2764 if (rs6000_altivec_abi)
2765 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2767 if (rs6000_darwin64_abi)
2768 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2770 fprintf (stderr, DEBUG_FMT_S, "single_float",
2771 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2773 fprintf (stderr, DEBUG_FMT_S, "double_float",
2774 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2776 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2777 (TARGET_SOFT_FLOAT ? "true" : "false"));
2779 if (TARGET_LINK_STACK)
2780 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2782 if (TARGET_P8_FUSION)
2784 char options[80];
2786 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2787 if (TARGET_TOC_FUSION)
2788 strcat (options, ", toc");
2790 if (TARGET_P8_FUSION_SIGN)
2791 strcat (options, ", sign");
2793 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2796 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2797 TARGET_SECURE_PLT ? "secure" : "bss");
2798 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2799 aix_struct_return ? "aix" : "sysv");
2800 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2801 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2802 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2803 tf[!!rs6000_align_branch_targets]);
2804 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2805 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2806 rs6000_long_double_type_size);
2807 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2808 (int)rs6000_sched_restricted_insns_priority);
2809 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2810 (int)END_BUILTINS);
2811 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2812 (int)RS6000_BUILTIN_COUNT);
2814 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2815 (int)TARGET_FLOAT128_ENABLE_TYPE);
2817 if (TARGET_VSX)
2818 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2819 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2821 if (TARGET_DIRECT_MOVE_128)
2822 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2823 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2827 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2828 legitimate address support to figure out the appropriate addressing to
2829 use. */
2831 static void
2832 rs6000_setup_reg_addr_masks (void)
2834 ssize_t rc, reg, m, nregs;
2835 addr_mask_type any_addr_mask, addr_mask;
2837 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2839 machine_mode m2 = (machine_mode) m;
2840 bool complex_p = false;
2841 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2842 size_t msize;
2844 if (COMPLEX_MODE_P (m2))
2846 complex_p = true;
2847 m2 = GET_MODE_INNER (m2);
2850 msize = GET_MODE_SIZE (m2);
2852 /* SDmode is special in that we want to access it only via REG+REG
2853 addressing on power7 and above, since we want to use the LFIWZX and
2854 STFIWZX instructions to load it. */
2855 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2857 any_addr_mask = 0;
2858 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2860 addr_mask = 0;
2861 reg = reload_reg_map[rc].reg;
2863 /* Can mode values go in the GPR/FPR/Altivec registers? */
2864 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2866 bool small_int_vsx_p = (small_int_p
2867 && (rc == RELOAD_REG_FPR
2868 || rc == RELOAD_REG_VMX));
2870 nregs = rs6000_hard_regno_nregs[m][reg];
2871 addr_mask |= RELOAD_REG_VALID;
2873 /* Indicate if the mode takes more than 1 physical register. If
2874 it takes a single register, indicate it can do REG+REG
2875 addressing. Small integers in VSX registers can only do
2876 REG+REG addressing. */
2877 if (small_int_vsx_p)
2878 addr_mask |= RELOAD_REG_INDEXED;
2879 else if (nregs > 1 || m == BLKmode || complex_p)
2880 addr_mask |= RELOAD_REG_MULTIPLE;
2881 else
2882 addr_mask |= RELOAD_REG_INDEXED;
2884 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2885 addressing. If we allow scalars into Altivec registers,
2886 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2888 if (TARGET_UPDATE
2889 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2890 && msize <= 8
2891 && !VECTOR_MODE_P (m2)
2892 && !FLOAT128_VECTOR_P (m2)
2893 && !complex_p
2894 && !small_int_vsx_p)
2896 addr_mask |= RELOAD_REG_PRE_INCDEC;
2898 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2899 we don't allow PRE_MODIFY for some multi-register
2900 operations. */
2901 switch (m)
2903 default:
2904 addr_mask |= RELOAD_REG_PRE_MODIFY;
2905 break;
2907 case DImode:
2908 if (TARGET_POWERPC64)
2909 addr_mask |= RELOAD_REG_PRE_MODIFY;
2910 break;
2912 case DFmode:
2913 case DDmode:
2914 if (TARGET_DF_INSN)
2915 addr_mask |= RELOAD_REG_PRE_MODIFY;
2916 break;
2921 /* GPR and FPR registers can do REG+OFFSET addressing, except
2922 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2923 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2924 if ((addr_mask != 0) && !indexed_only_p
2925 && msize <= 8
2926 && (rc == RELOAD_REG_GPR
2927 || ((msize == 8 || m2 == SFmode)
2928 && (rc == RELOAD_REG_FPR
2929 || (rc == RELOAD_REG_VMX
2930 && TARGET_P9_DFORM_SCALAR)))))
2931 addr_mask |= RELOAD_REG_OFFSET;
2933 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2934 instructions are enabled. The offset for 128-bit VSX registers is
2935 only 12-bits. While GPRs can handle the full offset range, VSX
2936 registers can only handle the restricted range. */
2937 else if ((addr_mask != 0) && !indexed_only_p
2938 && msize == 16 && TARGET_P9_DFORM_VECTOR
2939 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2940 || (m2 == TImode && TARGET_VSX_TIMODE)))
2942 addr_mask |= RELOAD_REG_OFFSET;
2943 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2944 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2947 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2948 addressing on 128-bit types. */
2949 if (rc == RELOAD_REG_VMX && msize == 16
2950 && (addr_mask & RELOAD_REG_VALID) != 0)
2951 addr_mask |= RELOAD_REG_AND_M16;
2953 reg_addr[m].addr_mask[rc] = addr_mask;
2954 any_addr_mask |= addr_mask;
2957 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2962 /* Initialize the various global tables that are based on register size. */
2963 static void
2964 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2966 ssize_t r, m, c;
2967 int align64;
2968 int align32;
2970 /* Precalculate REGNO_REG_CLASS. */
2971 rs6000_regno_regclass[0] = GENERAL_REGS;
2972 for (r = 1; r < 32; ++r)
2973 rs6000_regno_regclass[r] = BASE_REGS;
2975 for (r = 32; r < 64; ++r)
2976 rs6000_regno_regclass[r] = FLOAT_REGS;
2978 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2979 rs6000_regno_regclass[r] = NO_REGS;
2981 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2982 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2984 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2985 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2986 rs6000_regno_regclass[r] = CR_REGS;
2988 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2989 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2990 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2991 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2992 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2993 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2994 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2995 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2996 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2997 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2999 /* Precalculate register class to simpler reload register class. We don't
3000 need all of the register classes that are combinations of different
3001 classes, just the simple ones that have constraint letters. */
3002 for (c = 0; c < N_REG_CLASSES; c++)
3003 reg_class_to_reg_type[c] = NO_REG_TYPE;
3005 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3006 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3007 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3008 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3009 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3010 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3011 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3012 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3013 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3014 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3016 if (TARGET_VSX)
3018 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3019 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3021 else
3023 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3024 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3027 /* Precalculate the valid memory formats as well as the vector information,
3028 this must be set up before the rs6000_hard_regno_nregs_internal calls
3029 below. */
3030 gcc_assert ((int)VECTOR_NONE == 0);
3031 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3032 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3034 gcc_assert ((int)CODE_FOR_nothing == 0);
3035 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3037 gcc_assert ((int)NO_REGS == 0);
3038 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3040 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3041 believes it can use native alignment or still uses 128-bit alignment. */
3042 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3044 align64 = 64;
3045 align32 = 32;
3047 else
3049 align64 = 128;
3050 align32 = 128;
3053 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3054 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3055 if (TARGET_FLOAT128_TYPE)
3057 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3058 rs6000_vector_align[KFmode] = 128;
3060 if (FLOAT128_IEEE_P (TFmode))
3062 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3063 rs6000_vector_align[TFmode] = 128;
3067 /* V2DF mode, VSX only. */
3068 if (TARGET_VSX)
3070 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3071 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3072 rs6000_vector_align[V2DFmode] = align64;
3075 /* V4SF mode, either VSX or Altivec. */
3076 if (TARGET_VSX)
3078 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3079 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3080 rs6000_vector_align[V4SFmode] = align32;
3082 else if (TARGET_ALTIVEC)
3084 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3085 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3086 rs6000_vector_align[V4SFmode] = align32;
3089 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3090 and stores. */
3091 if (TARGET_ALTIVEC)
3093 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3094 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3095 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3096 rs6000_vector_align[V4SImode] = align32;
3097 rs6000_vector_align[V8HImode] = align32;
3098 rs6000_vector_align[V16QImode] = align32;
3100 if (TARGET_VSX)
3102 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3103 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3104 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3106 else
3108 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3109 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3110 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3114 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3115 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3116 if (TARGET_VSX)
3118 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3119 rs6000_vector_unit[V2DImode]
3120 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3121 rs6000_vector_align[V2DImode] = align64;
3123 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3124 rs6000_vector_unit[V1TImode]
3125 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3126 rs6000_vector_align[V1TImode] = 128;
3129 /* DFmode, see if we want to use the VSX unit. Memory is handled
3130 differently, so don't set rs6000_vector_mem. */
3131 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
3133 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3134 rs6000_vector_align[DFmode] = 64;
3137 /* SFmode, see if we want to use the VSX unit. */
3138 if (TARGET_P8_VECTOR && TARGET_VSX_SCALAR_FLOAT)
3140 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3141 rs6000_vector_align[SFmode] = 32;
3144 /* Allow TImode in VSX register and set the VSX memory macros. */
3145 if (TARGET_VSX && TARGET_VSX_TIMODE)
3147 rs6000_vector_mem[TImode] = VECTOR_VSX;
3148 rs6000_vector_align[TImode] = align64;
3151 /* TODO add paired floating point vector support. */
3153 /* Register class constraints for the constraints that depend on compile
3154 switches. When the VSX code was added, different constraints were added
3155 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3156 of the VSX registers are used. The register classes for scalar floating
3157 point types is set, based on whether we allow that type into the upper
3158 (Altivec) registers. GCC has register classes to target the Altivec
3159 registers for load/store operations, to select using a VSX memory
3160 operation instead of the traditional floating point operation. The
3161 constraints are:
3163 d - Register class to use with traditional DFmode instructions.
3164 f - Register class to use with traditional SFmode instructions.
3165 v - Altivec register.
3166 wa - Any VSX register.
3167 wc - Reserved to represent individual CR bits (used in LLVM).
3168 wd - Preferred register class for V2DFmode.
3169 wf - Preferred register class for V4SFmode.
3170 wg - Float register for power6x move insns.
3171 wh - FP register for direct move instructions.
3172 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3173 wj - FP or VSX register to hold 64-bit integers for direct moves.
3174 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3175 wl - Float register if we can do 32-bit signed int loads.
3176 wm - VSX register for ISA 2.07 direct move operations.
3177 wn - always NO_REGS.
3178 wr - GPR if 64-bit mode is permitted.
3179 ws - Register class to do ISA 2.06 DF operations.
3180 wt - VSX register for TImode in VSX registers.
3181 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3182 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3183 ww - Register class to do SF conversions in with VSX operations.
3184 wx - Float register if we can do 32-bit int stores.
3185 wy - Register class to do ISA 2.07 SF operations.
3186 wz - Float register if we can do 32-bit unsigned int loads.
3187 wH - Altivec register if SImode is allowed in VSX registers.
3188 wI - VSX register if SImode is allowed in VSX registers.
3189 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3190 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3192 if (TARGET_HARD_FLOAT)
3193 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3195 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3196 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3198 if (TARGET_VSX)
3200 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3201 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3202 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3203 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3204 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3205 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3207 if (TARGET_VSX_TIMODE)
3208 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3211 /* Add conditional constraints based on various options, to allow us to
3212 collapse multiple insn patterns. */
3213 if (TARGET_ALTIVEC)
3214 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3216 if (TARGET_MFPGPR) /* DFmode */
3217 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3219 if (TARGET_LFIWAX)
3220 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3222 if (TARGET_DIRECT_MOVE)
3224 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3225 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3226 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3227 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3228 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3229 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3232 if (TARGET_POWERPC64)
3234 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3235 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3238 if (TARGET_P8_VECTOR) /* SFmode */
3240 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3241 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3242 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3244 else if (TARGET_P8_VECTOR)
3246 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
3247 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3249 else if (TARGET_VSX)
3250 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3252 if (TARGET_STFIWX)
3253 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3255 if (TARGET_LFIWZX)
3256 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3258 if (TARGET_FLOAT128_TYPE)
3260 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3261 if (FLOAT128_IEEE_P (TFmode))
3262 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3265 /* Support for new D-form instructions. */
3266 if (TARGET_P9_DFORM_SCALAR)
3267 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3269 /* Support for ISA 3.0 (power9) vectors. */
3270 if (TARGET_P9_VECTOR)
3271 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3273 /* Support for new direct moves (ISA 3.0 + 64bit). */
3274 if (TARGET_DIRECT_MOVE_128)
3275 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3277 /* Support small integers in VSX registers. */
3278 if (TARGET_P8_VECTOR)
3280 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3281 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3282 if (TARGET_P9_VECTOR)
3284 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3285 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3289 /* Set up the reload helper and direct move functions. */
3290 if (TARGET_VSX || TARGET_ALTIVEC)
3292 if (TARGET_64BIT)
3294 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3295 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3296 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3297 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3298 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3299 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3300 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3301 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3302 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3303 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3304 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3305 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3306 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3307 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3308 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3309 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3310 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3311 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3312 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3313 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3315 if (FLOAT128_VECTOR_P (KFmode))
3317 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3318 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3321 if (FLOAT128_VECTOR_P (TFmode))
3323 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3324 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3327 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3328 available. */
3329 if (TARGET_NO_SDMODE_STACK)
3331 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3332 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3335 if (TARGET_VSX_TIMODE)
3337 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3338 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3341 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3343 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3344 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3345 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3346 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3347 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3348 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3349 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3350 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3351 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3353 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3354 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3355 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3356 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3357 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3358 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3359 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3360 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3361 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3363 if (FLOAT128_VECTOR_P (KFmode))
3365 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3366 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3369 if (FLOAT128_VECTOR_P (TFmode))
3371 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3372 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3376 else
3378 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3379 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3380 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3381 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3382 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3383 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3384 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3385 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3386 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3387 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3388 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3389 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3390 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3391 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3392 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3393 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3394 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3395 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3396 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3397 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3399 if (FLOAT128_VECTOR_P (KFmode))
3401 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3402 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3405 if (FLOAT128_IEEE_P (TFmode))
3407 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3408 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3411 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3412 available. */
3413 if (TARGET_NO_SDMODE_STACK)
3415 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3416 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3419 if (TARGET_VSX_TIMODE)
3421 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3422 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3425 if (TARGET_DIRECT_MOVE)
3427 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3428 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3429 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3433 reg_addr[DFmode].scalar_in_vmx_p = true;
3434 reg_addr[DImode].scalar_in_vmx_p = true;
3436 if (TARGET_P8_VECTOR)
3438 reg_addr[SFmode].scalar_in_vmx_p = true;
3439 reg_addr[SImode].scalar_in_vmx_p = true;
3441 if (TARGET_P9_VECTOR)
3443 reg_addr[HImode].scalar_in_vmx_p = true;
3444 reg_addr[QImode].scalar_in_vmx_p = true;
3449 /* Setup the fusion operations. */
3450 if (TARGET_P8_FUSION)
3452 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3453 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3454 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3455 if (TARGET_64BIT)
3456 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3459 if (TARGET_P9_FUSION)
3461 struct fuse_insns {
3462 enum machine_mode mode; /* mode of the fused type. */
3463 enum machine_mode pmode; /* pointer mode. */
3464 enum rs6000_reload_reg_type rtype; /* register type. */
3465 enum insn_code load; /* load insn. */
3466 enum insn_code store; /* store insn. */
3469 static const struct fuse_insns addis_insns[] = {
3470 { SFmode, DImode, RELOAD_REG_FPR,
3471 CODE_FOR_fusion_vsx_di_sf_load,
3472 CODE_FOR_fusion_vsx_di_sf_store },
3474 { SFmode, SImode, RELOAD_REG_FPR,
3475 CODE_FOR_fusion_vsx_si_sf_load,
3476 CODE_FOR_fusion_vsx_si_sf_store },
3478 { DFmode, DImode, RELOAD_REG_FPR,
3479 CODE_FOR_fusion_vsx_di_df_load,
3480 CODE_FOR_fusion_vsx_di_df_store },
3482 { DFmode, SImode, RELOAD_REG_FPR,
3483 CODE_FOR_fusion_vsx_si_df_load,
3484 CODE_FOR_fusion_vsx_si_df_store },
3486 { DImode, DImode, RELOAD_REG_FPR,
3487 CODE_FOR_fusion_vsx_di_di_load,
3488 CODE_FOR_fusion_vsx_di_di_store },
3490 { DImode, SImode, RELOAD_REG_FPR,
3491 CODE_FOR_fusion_vsx_si_di_load,
3492 CODE_FOR_fusion_vsx_si_di_store },
3494 { QImode, DImode, RELOAD_REG_GPR,
3495 CODE_FOR_fusion_gpr_di_qi_load,
3496 CODE_FOR_fusion_gpr_di_qi_store },
3498 { QImode, SImode, RELOAD_REG_GPR,
3499 CODE_FOR_fusion_gpr_si_qi_load,
3500 CODE_FOR_fusion_gpr_si_qi_store },
3502 { HImode, DImode, RELOAD_REG_GPR,
3503 CODE_FOR_fusion_gpr_di_hi_load,
3504 CODE_FOR_fusion_gpr_di_hi_store },
3506 { HImode, SImode, RELOAD_REG_GPR,
3507 CODE_FOR_fusion_gpr_si_hi_load,
3508 CODE_FOR_fusion_gpr_si_hi_store },
3510 { SImode, DImode, RELOAD_REG_GPR,
3511 CODE_FOR_fusion_gpr_di_si_load,
3512 CODE_FOR_fusion_gpr_di_si_store },
3514 { SImode, SImode, RELOAD_REG_GPR,
3515 CODE_FOR_fusion_gpr_si_si_load,
3516 CODE_FOR_fusion_gpr_si_si_store },
3518 { SFmode, DImode, RELOAD_REG_GPR,
3519 CODE_FOR_fusion_gpr_di_sf_load,
3520 CODE_FOR_fusion_gpr_di_sf_store },
3522 { SFmode, SImode, RELOAD_REG_GPR,
3523 CODE_FOR_fusion_gpr_si_sf_load,
3524 CODE_FOR_fusion_gpr_si_sf_store },
3526 { DImode, DImode, RELOAD_REG_GPR,
3527 CODE_FOR_fusion_gpr_di_di_load,
3528 CODE_FOR_fusion_gpr_di_di_store },
3530 { DFmode, DImode, RELOAD_REG_GPR,
3531 CODE_FOR_fusion_gpr_di_df_load,
3532 CODE_FOR_fusion_gpr_di_df_store },
3535 machine_mode cur_pmode = Pmode;
3536 size_t i;
3538 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3540 machine_mode xmode = addis_insns[i].mode;
3541 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3543 if (addis_insns[i].pmode != cur_pmode)
3544 continue;
3546 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3547 continue;
3549 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3550 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3552 if (rtype == RELOAD_REG_FPR && TARGET_P9_DFORM_SCALAR)
3554 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3555 = addis_insns[i].load;
3556 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3557 = addis_insns[i].store;
3562 /* Note which types we support fusing TOC setup plus memory insn. We only do
3563 fused TOCs for medium/large code models. */
3564 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3565 && (TARGET_CMODEL != CMODEL_SMALL))
3567 reg_addr[QImode].fused_toc = true;
3568 reg_addr[HImode].fused_toc = true;
3569 reg_addr[SImode].fused_toc = true;
3570 reg_addr[DImode].fused_toc = true;
3571 if (TARGET_HARD_FLOAT)
3573 if (TARGET_SINGLE_FLOAT)
3574 reg_addr[SFmode].fused_toc = true;
3575 if (TARGET_DOUBLE_FLOAT)
3576 reg_addr[DFmode].fused_toc = true;
3580 /* Precalculate HARD_REGNO_NREGS. */
3581 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3582 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3583 rs6000_hard_regno_nregs[m][r]
3584 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3586 /* Precalculate HARD_REGNO_MODE_OK. */
3587 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3588 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3589 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
3590 rs6000_hard_regno_mode_ok_p[m][r] = true;
3592 /* Precalculate CLASS_MAX_NREGS sizes. */
3593 for (c = 0; c < LIM_REG_CLASSES; ++c)
3595 int reg_size;
3597 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3598 reg_size = UNITS_PER_VSX_WORD;
3600 else if (c == ALTIVEC_REGS)
3601 reg_size = UNITS_PER_ALTIVEC_WORD;
3603 else if (c == FLOAT_REGS)
3604 reg_size = UNITS_PER_FP_WORD;
3606 else
3607 reg_size = UNITS_PER_WORD;
3609 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3611 machine_mode m2 = (machine_mode)m;
3612 int reg_size2 = reg_size;
3614 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3615 in VSX. */
3616 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3617 reg_size2 = UNITS_PER_FP_WORD;
3619 rs6000_class_max_nregs[m][c]
3620 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3624 /* Calculate which modes to automatically generate code to use a the
3625 reciprocal divide and square root instructions. In the future, possibly
3626 automatically generate the instructions even if the user did not specify
3627 -mrecip. The older machines double precision reciprocal sqrt estimate is
3628 not accurate enough. */
3629 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3630 if (TARGET_FRES)
3631 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3632 if (TARGET_FRE)
3633 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3634 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3635 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3636 if (VECTOR_UNIT_VSX_P (V2DFmode))
3637 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3639 if (TARGET_FRSQRTES)
3640 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3641 if (TARGET_FRSQRTE)
3642 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3643 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3644 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3645 if (VECTOR_UNIT_VSX_P (V2DFmode))
3646 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3648 if (rs6000_recip_control)
3650 if (!flag_finite_math_only)
3651 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
3652 if (flag_trapping_math)
3653 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
3654 if (!flag_reciprocal_math)
3655 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
3656 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3658 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3659 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3660 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3662 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3663 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3664 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3666 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3667 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3668 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3670 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3671 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3672 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3674 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3675 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3676 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3678 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3679 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3680 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3682 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3683 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3684 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3686 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3687 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3688 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3692 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3693 legitimate address support to figure out the appropriate addressing to
3694 use. */
3695 rs6000_setup_reg_addr_masks ();
3697 if (global_init_p || TARGET_DEBUG_TARGET)
3699 if (TARGET_DEBUG_REG)
3700 rs6000_debug_reg_global ();
3702 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3703 fprintf (stderr,
3704 "SImode variable mult cost = %d\n"
3705 "SImode constant mult cost = %d\n"
3706 "SImode short constant mult cost = %d\n"
3707 "DImode multipliciation cost = %d\n"
3708 "SImode division cost = %d\n"
3709 "DImode division cost = %d\n"
3710 "Simple fp operation cost = %d\n"
3711 "DFmode multiplication cost = %d\n"
3712 "SFmode division cost = %d\n"
3713 "DFmode division cost = %d\n"
3714 "cache line size = %d\n"
3715 "l1 cache size = %d\n"
3716 "l2 cache size = %d\n"
3717 "simultaneous prefetches = %d\n"
3718 "\n",
3719 rs6000_cost->mulsi,
3720 rs6000_cost->mulsi_const,
3721 rs6000_cost->mulsi_const9,
3722 rs6000_cost->muldi,
3723 rs6000_cost->divsi,
3724 rs6000_cost->divdi,
3725 rs6000_cost->fp,
3726 rs6000_cost->dmul,
3727 rs6000_cost->sdiv,
3728 rs6000_cost->ddiv,
3729 rs6000_cost->cache_line_size,
3730 rs6000_cost->l1_cache_size,
3731 rs6000_cost->l2_cache_size,
3732 rs6000_cost->simultaneous_prefetches);
3736 #if TARGET_MACHO
3737 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3739 static void
3740 darwin_rs6000_override_options (void)
3742 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3743 off. */
3744 rs6000_altivec_abi = 1;
3745 TARGET_ALTIVEC_VRSAVE = 1;
3746 rs6000_current_abi = ABI_DARWIN;
3748 if (DEFAULT_ABI == ABI_DARWIN
3749 && TARGET_64BIT)
3750 darwin_one_byte_bool = 1;
3752 if (TARGET_64BIT && ! TARGET_POWERPC64)
3754 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3755 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3757 if (flag_mkernel)
3759 rs6000_default_long_calls = 1;
3760 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3763 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3764 Altivec. */
3765 if (!flag_mkernel && !flag_apple_kext
3766 && TARGET_64BIT
3767 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3768 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3770 /* Unless the user (not the configurer) has explicitly overridden
3771 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3772 G4 unless targeting the kernel. */
3773 if (!flag_mkernel
3774 && !flag_apple_kext
3775 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3776 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3777 && ! global_options_set.x_rs6000_cpu_index)
3779 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3782 #endif
3784 /* If not otherwise specified by a target, make 'long double' equivalent to
3785 'double'. */
3787 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3788 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3789 #endif
3791 /* Return the builtin mask of the various options used that could affect which
3792 builtins were used. In the past we used target_flags, but we've run out of
3793 bits, and some options like PAIRED are no longer in target_flags. */
3795 HOST_WIDE_INT
3796 rs6000_builtin_mask_calculate (void)
3798 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3799 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3800 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3801 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3802 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3803 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3804 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3805 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3806 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3807 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3808 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3809 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3810 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3811 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3812 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3813 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3814 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3815 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3816 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3817 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3818 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0));
3821 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3822 to clobber the XER[CA] bit because clobbering that bit without telling
3823 the compiler worked just fine with versions of GCC before GCC 5, and
3824 breaking a lot of older code in ways that are hard to track down is
3825 not such a great idea. */
3827 static rtx_insn *
3828 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3829 vec<const char *> &/*constraints*/,
3830 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3832 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3833 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3834 return NULL;
3837 /* Override command line options.
3839 Combine build-specific configuration information with options
3840 specified on the command line to set various state variables which
3841 influence code generation, optimization, and expansion of built-in
3842 functions. Assure that command-line configuration preferences are
3843 compatible with each other and with the build configuration; issue
3844 warnings while adjusting configuration or error messages while
3845 rejecting configuration.
3847 Upon entry to this function:
3849 This function is called once at the beginning of
3850 compilation, and then again at the start and end of compiling
3851 each section of code that has a different configuration, as
3852 indicated, for example, by adding the
3854 __attribute__((__target__("cpu=power9")))
3856 qualifier to a function definition or, for example, by bracketing
3857 code between
3859 #pragma GCC target("altivec")
3863 #pragma GCC reset_options
3865 directives. Parameter global_init_p is true for the initial
3866 invocation, which initializes global variables, and false for all
3867 subsequent invocations.
3870 Various global state information is assumed to be valid. This
3871 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3872 default CPU specified at build configure time, TARGET_DEFAULT,
3873 representing the default set of option flags for the default
3874 target, and global_options_set.x_rs6000_isa_flags, representing
3875 which options were requested on the command line.
3877 Upon return from this function:
3879 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3880 was set by name on the command line. Additionally, if certain
3881 attributes are automatically enabled or disabled by this function
3882 in order to assure compatibility between options and
3883 configuration, the flags associated with those attributes are
3884 also set. By setting these "explicit bits", we avoid the risk
3885 that other code might accidentally overwrite these particular
3886 attributes with "default values".
3888 The various bits of rs6000_isa_flags are set to indicate the
3889 target options that have been selected for the most current
3890 compilation efforts. This has the effect of also turning on the
3891 associated TARGET_XXX values since these are macros which are
3892 generally defined to test the corresponding bit of the
3893 rs6000_isa_flags variable.
3895 The variable rs6000_builtin_mask is set to represent the target
3896 options for the most current compilation efforts, consistent with
3897 the current contents of rs6000_isa_flags. This variable controls
3898 expansion of built-in functions.
3900 Various other global variables and fields of global structures
3901 (over 50 in all) are initialized to reflect the desired options
3902 for the most current compilation efforts. */
3904 static bool
3905 rs6000_option_override_internal (bool global_init_p)
3907 bool ret = true;
3908 bool have_cpu = false;
3910 /* The default cpu requested at configure time, if any. */
3911 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3913 HOST_WIDE_INT set_masks;
3914 HOST_WIDE_INT ignore_masks;
3915 int cpu_index;
3916 int tune_index;
3917 struct cl_target_option *main_target_opt
3918 = ((global_init_p || target_option_default_node == NULL)
3919 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3921 /* Print defaults. */
3922 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3923 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3925 /* Remember the explicit arguments. */
3926 if (global_init_p)
3927 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3929 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3930 library functions, so warn about it. The flag may be useful for
3931 performance studies from time to time though, so don't disable it
3932 entirely. */
3933 if (global_options_set.x_rs6000_alignment_flags
3934 && rs6000_alignment_flags == MASK_ALIGN_POWER
3935 && DEFAULT_ABI == ABI_DARWIN
3936 && TARGET_64BIT)
3937 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3938 " it is incompatible with the installed C and C++ libraries");
3940 /* Numerous experiment shows that IRA based loop pressure
3941 calculation works better for RTL loop invariant motion on targets
3942 with enough (>= 32) registers. It is an expensive optimization.
3943 So it is on only for peak performance. */
3944 if (optimize >= 3 && global_init_p
3945 && !global_options_set.x_flag_ira_loop_pressure)
3946 flag_ira_loop_pressure = 1;
3948 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3949 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3950 options were already specified. */
3951 if (flag_sanitize & SANITIZE_USER_ADDRESS
3952 && !global_options_set.x_flag_asynchronous_unwind_tables)
3953 flag_asynchronous_unwind_tables = 1;
3955 /* Set the pointer size. */
3956 if (TARGET_64BIT)
3958 rs6000_pmode = (int)DImode;
3959 rs6000_pointer_size = 64;
3961 else
3963 rs6000_pmode = (int)SImode;
3964 rs6000_pointer_size = 32;
3967 /* Some OSs don't support saving the high part of 64-bit registers on context
3968 switch. Other OSs don't support saving Altivec registers. On those OSs,
3969 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3970 if the user wants either, the user must explicitly specify them and we
3971 won't interfere with the user's specification. */
3973 set_masks = POWERPC_MASKS;
3974 #ifdef OS_MISSING_POWERPC64
3975 if (OS_MISSING_POWERPC64)
3976 set_masks &= ~OPTION_MASK_POWERPC64;
3977 #endif
3978 #ifdef OS_MISSING_ALTIVEC
3979 if (OS_MISSING_ALTIVEC)
3980 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3981 | OTHER_VSX_VECTOR_MASKS);
3982 #endif
3984 /* Don't override by the processor default if given explicitly. */
3985 set_masks &= ~rs6000_isa_flags_explicit;
3987 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3988 the cpu in a target attribute or pragma, but did not specify a tuning
3989 option, use the cpu for the tuning option rather than the option specified
3990 with -mtune on the command line. Process a '--with-cpu' configuration
3991 request as an implicit --cpu. */
3992 if (rs6000_cpu_index >= 0)
3994 cpu_index = rs6000_cpu_index;
3995 have_cpu = true;
3997 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3999 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
4000 have_cpu = true;
4002 else if (implicit_cpu)
4004 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
4005 have_cpu = true;
4007 else
4009 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4010 const char *default_cpu = ((!TARGET_POWERPC64)
4011 ? "powerpc"
4012 : ((BYTES_BIG_ENDIAN)
4013 ? "powerpc64"
4014 : "powerpc64le"));
4016 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4017 have_cpu = false;
4020 gcc_assert (cpu_index >= 0);
4022 if (have_cpu)
4024 #ifndef HAVE_AS_POWER9
4025 if (processor_target_table[rs6000_cpu_index].processor
4026 == PROCESSOR_POWER9)
4028 have_cpu = false;
4029 warning (0, "will not generate power9 instructions because "
4030 "assembler lacks power9 support");
4032 #endif
4033 #ifndef HAVE_AS_POWER8
4034 if (processor_target_table[rs6000_cpu_index].processor
4035 == PROCESSOR_POWER8)
4037 have_cpu = false;
4038 warning (0, "will not generate power8 instructions because "
4039 "assembler lacks power8 support");
4041 #endif
4042 #ifndef HAVE_AS_POPCNTD
4043 if (processor_target_table[rs6000_cpu_index].processor
4044 == PROCESSOR_POWER7)
4046 have_cpu = false;
4047 warning (0, "will not generate power7 instructions because "
4048 "assembler lacks power7 support");
4050 #endif
4051 #ifndef HAVE_AS_DFP
4052 if (processor_target_table[rs6000_cpu_index].processor
4053 == PROCESSOR_POWER6)
4055 have_cpu = false;
4056 warning (0, "will not generate power6 instructions because "
4057 "assembler lacks power6 support");
4059 #endif
4060 #ifndef HAVE_AS_POPCNTB
4061 if (processor_target_table[rs6000_cpu_index].processor
4062 == PROCESSOR_POWER5)
4064 have_cpu = false;
4065 warning (0, "will not generate power5 instructions because "
4066 "assembler lacks power5 support");
4068 #endif
4070 if (!have_cpu)
4072 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4073 const char *default_cpu = (!TARGET_POWERPC64
4074 ? "powerpc"
4075 : (BYTES_BIG_ENDIAN
4076 ? "powerpc64"
4077 : "powerpc64le"));
4079 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4083 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4084 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4085 with those from the cpu, except for options that were explicitly set. If
4086 we don't have a cpu, do not override the target bits set in
4087 TARGET_DEFAULT. */
4088 if (have_cpu)
4090 rs6000_isa_flags &= ~set_masks;
4091 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4092 & set_masks);
4094 else
4096 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4097 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4098 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4099 to using rs6000_isa_flags, we need to do the initialization here.
4101 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4102 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4103 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
4104 : processor_target_table[cpu_index].target_enable);
4105 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4108 if (rs6000_tune_index >= 0)
4109 tune_index = rs6000_tune_index;
4110 else if (have_cpu)
4111 rs6000_tune_index = tune_index = cpu_index;
4112 else
4114 size_t i;
4115 enum processor_type tune_proc
4116 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4118 tune_index = -1;
4119 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4120 if (processor_target_table[i].processor == tune_proc)
4122 rs6000_tune_index = tune_index = i;
4123 break;
4127 gcc_assert (tune_index >= 0);
4128 rs6000_cpu = processor_target_table[tune_index].processor;
4130 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4131 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4132 || rs6000_cpu == PROCESSOR_PPCE5500)
4134 if (TARGET_ALTIVEC)
4135 error ("AltiVec not supported in this target");
4138 /* If we are optimizing big endian systems for space, use the load/store
4139 multiple and string instructions. */
4140 if (BYTES_BIG_ENDIAN && optimize_size)
4141 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4142 | OPTION_MASK_STRING);
4144 /* Don't allow -mmultiple or -mstring on little endian systems
4145 unless the cpu is a 750, because the hardware doesn't support the
4146 instructions used in little endian mode, and causes an alignment
4147 trap. The 750 does not cause an alignment trap (except when the
4148 target is unaligned). */
4150 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4152 if (TARGET_MULTIPLE)
4154 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4155 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4156 warning (0, "-mmultiple is not supported on little endian systems");
4159 if (TARGET_STRING)
4161 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4162 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4163 warning (0, "-mstring is not supported on little endian systems");
4167 /* If little-endian, default to -mstrict-align on older processors.
4168 Testing for htm matches power8 and later. */
4169 if (!BYTES_BIG_ENDIAN
4170 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4171 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4173 /* -maltivec={le,be} implies -maltivec. */
4174 if (rs6000_altivec_element_order != 0)
4175 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4177 /* Disallow -maltivec=le in big endian mode for now. This is not
4178 known to be useful for anyone. */
4179 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4181 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4182 rs6000_altivec_element_order = 0;
4185 /* Add some warnings for VSX. */
4186 if (TARGET_VSX)
4188 const char *msg = NULL;
4189 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4191 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4192 msg = N_("-mvsx requires hardware floating point");
4193 else
4195 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4196 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4199 else if (TARGET_PAIRED_FLOAT)
4200 msg = N_("-mvsx and -mpaired are incompatible");
4201 else if (TARGET_AVOID_XFORM > 0)
4202 msg = N_("-mvsx needs indexed addressing");
4203 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4204 & OPTION_MASK_ALTIVEC))
4206 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4207 msg = N_("-mvsx and -mno-altivec are incompatible");
4208 else
4209 msg = N_("-mno-altivec disables vsx");
4212 if (msg)
4214 warning (0, msg);
4215 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4216 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4220 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4221 the -mcpu setting to enable options that conflict. */
4222 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4223 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4224 | OPTION_MASK_ALTIVEC
4225 | OPTION_MASK_VSX)) != 0)
4226 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4227 | OPTION_MASK_DIRECT_MOVE)
4228 & ~rs6000_isa_flags_explicit);
4230 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4231 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4233 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4234 off all of the options that depend on those flags. */
4235 ignore_masks = rs6000_disable_incompatible_switches ();
4237 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4238 unless the user explicitly used the -mno-<option> to disable the code. */
4239 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM_SCALAR
4240 || TARGET_P9_DFORM_VECTOR || TARGET_P9_DFORM_BOTH > 0)
4241 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4242 else if (TARGET_P9_MINMAX)
4244 if (have_cpu)
4246 if (cpu_index == PROCESSOR_POWER9)
4248 /* legacy behavior: allow -mcpu=power9 with certain
4249 capabilities explicitly disabled. */
4250 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4252 else
4253 error ("Power9 target option is incompatible with -mcpu=<xxx> for "
4254 "<xxx> less than power9");
4256 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4257 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4258 & rs6000_isa_flags_explicit))
4259 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4260 were explicitly cleared. */
4261 error ("-mpower9-minmax incompatible with explicitly disabled options");
4262 else
4263 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4265 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4266 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4267 else if (TARGET_VSX)
4268 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4269 else if (TARGET_POPCNTD)
4270 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4271 else if (TARGET_DFP)
4272 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4273 else if (TARGET_CMPB)
4274 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4275 else if (TARGET_FPRND)
4276 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4277 else if (TARGET_POPCNTB)
4278 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4279 else if (TARGET_ALTIVEC)
4280 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4282 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4284 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4285 error ("-mcrypto requires -maltivec");
4286 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4289 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4291 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4292 error ("-mdirect-move requires -mvsx");
4293 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4296 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4298 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4299 error ("-mpower8-vector requires -maltivec");
4300 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4303 if (TARGET_P8_VECTOR && !TARGET_VSX)
4305 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4306 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4307 error ("-mpower8-vector requires -mvsx");
4308 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4310 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4311 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4312 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4314 else
4316 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4317 not explicit. */
4318 rs6000_isa_flags |= OPTION_MASK_VSX;
4319 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4323 if (TARGET_VSX_TIMODE && !TARGET_VSX)
4325 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
4326 error ("-mvsx-timode requires -mvsx");
4327 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4330 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4332 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4333 error ("-mhard-dfp requires -mhard-float");
4334 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4337 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4338 silently turn off quad memory mode. */
4339 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4341 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4342 warning (0, N_("-mquad-memory requires 64-bit mode"));
4344 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4345 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4347 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4348 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4351 /* Non-atomic quad memory load/store are disabled for little endian, since
4352 the words are reversed, but atomic operations can still be done by
4353 swapping the words. */
4354 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4356 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4357 warning (0, N_("-mquad-memory is not available in little endian mode"));
4359 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4362 /* Assume if the user asked for normal quad memory instructions, they want
4363 the atomic versions as well, unless they explicity told us not to use quad
4364 word atomic instructions. */
4365 if (TARGET_QUAD_MEMORY
4366 && !TARGET_QUAD_MEMORY_ATOMIC
4367 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4368 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4370 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4371 generating power8 instructions. */
4372 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4373 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4374 & OPTION_MASK_P8_FUSION);
4376 /* Setting additional fusion flags turns on base fusion. */
4377 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4379 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4381 if (TARGET_P8_FUSION_SIGN)
4382 error ("-mpower8-fusion-sign requires -mpower8-fusion");
4384 if (TARGET_TOC_FUSION)
4385 error ("-mtoc-fusion requires -mpower8-fusion");
4387 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4389 else
4390 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4393 /* Power9 fusion is a superset over power8 fusion. */
4394 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4396 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4398 /* We prefer to not mention undocumented options in
4399 error messages. However, if users have managed to select
4400 power9-fusion without selecting power8-fusion, they
4401 already know about undocumented flags. */
4402 error ("-mpower9-fusion requires -mpower8-fusion");
4403 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4405 else
4406 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4409 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4410 generating power9 instructions. */
4411 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4412 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4413 & OPTION_MASK_P9_FUSION);
4415 /* Power8 does not fuse sign extended loads with the addis. If we are
4416 optimizing at high levels for speed, convert a sign extended load into a
4417 zero extending load, and an explicit sign extension. */
4418 if (TARGET_P8_FUSION
4419 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4420 && optimize_function_for_speed_p (cfun)
4421 && optimize >= 3)
4422 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4424 /* TOC fusion requires 64-bit and medium/large code model. */
4425 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4427 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4428 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4429 warning (0, N_("-mtoc-fusion requires 64-bit"));
4432 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4434 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4435 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4436 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4439 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4440 model. */
4441 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4442 && (TARGET_CMODEL != CMODEL_SMALL)
4443 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4444 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4446 /* ISA 3.0 vector instructions include ISA 2.07. */
4447 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4449 /* We prefer to not mention undocumented options in
4450 error messages. However, if users have managed to select
4451 power9-vector without selecting power8-vector, they
4452 already know about undocumented flags. */
4453 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4454 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4455 error ("-mpower9-vector requires -mpower8-vector");
4456 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4458 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4459 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4460 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4462 else
4464 /* OPTION_MASK_P9_VECTOR is explicit and
4465 OPTION_MASK_P8_VECTOR is not explicit. */
4466 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4467 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4471 /* -mpower9-dform turns on both -mpower9-dform-scalar and
4472 -mpower9-dform-vector. */
4473 if (TARGET_P9_DFORM_BOTH > 0)
4475 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4476 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
4478 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4479 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_SCALAR;
4481 else if (TARGET_P9_DFORM_BOTH == 0)
4483 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4484 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
4486 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4487 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4490 /* ISA 3.0 D-form instructions require p9-vector and upper-regs. */
4491 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR) && !TARGET_P9_VECTOR)
4493 /* We prefer to not mention undocumented options in
4494 error messages. However, if users have managed to select
4495 power9-dform without selecting power9-vector, they
4496 already know about undocumented flags. */
4497 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4498 && (rs6000_isa_flags_explicit & (OPTION_MASK_P9_DFORM_SCALAR
4499 | OPTION_MASK_P9_DFORM_VECTOR)))
4500 error ("-mpower9-dform requires -mpower9-vector");
4501 else if (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4503 rs6000_isa_flags &=
4504 ~(OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4505 rs6000_isa_flags_explicit |=
4506 (OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4508 else
4510 /* We know that OPTION_MASK_P9_VECTOR is not explicit and
4511 OPTION_MASK_P9_DFORM_SCALAR or OPTION_MASK_P9_DORM_VECTOR
4512 may be explicit. */
4513 rs6000_isa_flags |= OPTION_MASK_P9_VECTOR;
4514 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4518 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR)
4519 && !TARGET_DIRECT_MOVE)
4521 /* We prefer to not mention undocumented options in
4522 error messages. However, if users have managed to select
4523 power9-dform without selecting direct-move, they
4524 already know about undocumented flags. */
4525 if ((rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4526 && ((rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) ||
4527 (rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR) ||
4528 (TARGET_P9_DFORM_BOTH == 1)))
4529 error ("-mpower9-dform, -mpower9-dform-vector, -mpower9-dform-scalar"
4530 " require -mdirect-move");
4531 else if ((rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE) == 0)
4533 rs6000_isa_flags |= OPTION_MASK_DIRECT_MOVE;
4534 rs6000_isa_flags_explicit |= OPTION_MASK_DIRECT_MOVE;
4536 else
4538 rs6000_isa_flags &=
4539 ~(OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4540 rs6000_isa_flags_explicit |=
4541 (OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4545 /* Enable -mvsx-timode by default if VSX. */
4546 if (TARGET_VSX && !TARGET_VSX_TIMODE
4547 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
4548 rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
4550 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4551 support. If we only have ISA 2.06 support, and the user did not specify
4552 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4553 but we don't enable the full vectorization support */
4554 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4555 TARGET_ALLOW_MOVMISALIGN = 1;
4557 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4559 if (TARGET_ALLOW_MOVMISALIGN > 0
4560 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4561 error ("-mallow-movmisalign requires -mvsx");
4563 TARGET_ALLOW_MOVMISALIGN = 0;
4566 /* Determine when unaligned vector accesses are permitted, and when
4567 they are preferred over masked Altivec loads. Note that if
4568 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4569 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4570 not true. */
4571 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4573 if (!TARGET_VSX)
4575 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4576 error ("-mefficient-unaligned-vsx requires -mvsx");
4578 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4581 else if (!TARGET_ALLOW_MOVMISALIGN)
4583 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4584 error ("-mefficient-unaligned-vsx requires -mallow-movmisalign");
4586 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4590 /* Set long double size before the IEEE 128-bit tests. */
4591 if (!global_options_set.x_rs6000_long_double_type_size)
4593 if (main_target_opt != NULL
4594 && (main_target_opt->x_rs6000_long_double_type_size
4595 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4596 error ("target attribute or pragma changes long double size");
4597 else
4598 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4601 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4602 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4603 pick up this default. */
4604 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4605 if (!global_options_set.x_rs6000_ieeequad)
4606 rs6000_ieeequad = 1;
4607 #endif
4609 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4610 sytems, but don't enable the __float128 keyword. */
4611 if (TARGET_VSX && TARGET_LONG_DOUBLE_128
4612 && (TARGET_FLOAT128_ENABLE_TYPE || TARGET_IEEEQUAD)
4613 && ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) == 0))
4614 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4616 /* IEEE 128-bit floating point requires VSX support. */
4617 if (!TARGET_VSX)
4619 if (TARGET_FLOAT128_KEYWORD)
4621 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4622 error ("-mfloat128 requires VSX support");
4624 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4625 | OPTION_MASK_FLOAT128_KEYWORD
4626 | OPTION_MASK_FLOAT128_HW);
4629 else if (TARGET_FLOAT128_TYPE)
4631 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) != 0)
4632 error ("-mfloat128-type requires VSX support");
4634 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4635 | OPTION_MASK_FLOAT128_KEYWORD
4636 | OPTION_MASK_FLOAT128_HW);
4640 /* -mfloat128 and -mfloat128-hardware internally require the underlying IEEE
4641 128-bit floating point support to be enabled. */
4642 if (!TARGET_FLOAT128_TYPE)
4644 if (TARGET_FLOAT128_KEYWORD)
4646 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4648 error ("-mfloat128 requires -mfloat128-type");
4649 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4650 | OPTION_MASK_FLOAT128_KEYWORD
4651 | OPTION_MASK_FLOAT128_HW);
4653 else
4654 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4657 if (TARGET_FLOAT128_HW)
4659 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4661 error ("-mfloat128-hardware requires -mfloat128-type");
4662 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4664 else
4665 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4666 | OPTION_MASK_FLOAT128_KEYWORD
4667 | OPTION_MASK_FLOAT128_HW);
4671 /* If we have -mfloat128-type and full ISA 3.0 support, enable
4672 -mfloat128-hardware by default. However, don't enable the __float128
4673 keyword. If the user explicitly turned on -mfloat128-hardware, enable the
4674 -mfloat128 option as well if it was not already set. */
4675 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW
4676 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4677 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4678 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4680 if (TARGET_FLOAT128_HW
4681 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4683 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4684 error ("-mfloat128-hardware requires full ISA 3.0 support");
4686 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4689 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4691 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4692 error ("-mfloat128-hardware requires -m64");
4694 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4697 if (TARGET_FLOAT128_HW && !TARGET_FLOAT128_KEYWORD
4698 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0
4699 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4700 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4702 /* Print the options after updating the defaults. */
4703 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4704 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4706 /* E500mc does "better" if we inline more aggressively. Respect the
4707 user's opinion, though. */
4708 if (rs6000_block_move_inline_limit == 0
4709 && (rs6000_cpu == PROCESSOR_PPCE500MC
4710 || rs6000_cpu == PROCESSOR_PPCE500MC64
4711 || rs6000_cpu == PROCESSOR_PPCE5500
4712 || rs6000_cpu == PROCESSOR_PPCE6500))
4713 rs6000_block_move_inline_limit = 128;
4715 /* store_one_arg depends on expand_block_move to handle at least the
4716 size of reg_parm_stack_space. */
4717 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4718 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4720 if (global_init_p)
4722 /* If the appropriate debug option is enabled, replace the target hooks
4723 with debug versions that call the real version and then prints
4724 debugging information. */
4725 if (TARGET_DEBUG_COST)
4727 targetm.rtx_costs = rs6000_debug_rtx_costs;
4728 targetm.address_cost = rs6000_debug_address_cost;
4729 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4732 if (TARGET_DEBUG_ADDR)
4734 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4735 targetm.legitimize_address = rs6000_debug_legitimize_address;
4736 rs6000_secondary_reload_class_ptr
4737 = rs6000_debug_secondary_reload_class;
4738 rs6000_secondary_memory_needed_ptr
4739 = rs6000_debug_secondary_memory_needed;
4740 rs6000_cannot_change_mode_class_ptr
4741 = rs6000_debug_cannot_change_mode_class;
4742 rs6000_preferred_reload_class_ptr
4743 = rs6000_debug_preferred_reload_class;
4744 rs6000_legitimize_reload_address_ptr
4745 = rs6000_debug_legitimize_reload_address;
4746 rs6000_mode_dependent_address_ptr
4747 = rs6000_debug_mode_dependent_address;
4750 if (rs6000_veclibabi_name)
4752 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4753 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4754 else
4756 error ("unknown vectorization library ABI type (%s) for "
4757 "-mveclibabi= switch", rs6000_veclibabi_name);
4758 ret = false;
4763 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4764 target attribute or pragma which automatically enables both options,
4765 unless the altivec ABI was set. This is set by default for 64-bit, but
4766 not for 32-bit. */
4767 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4768 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4769 | OPTION_MASK_FLOAT128_TYPE
4770 | OPTION_MASK_FLOAT128_KEYWORD)
4771 & ~rs6000_isa_flags_explicit);
4773 /* Enable Altivec ABI for AIX -maltivec. */
4774 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4776 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4777 error ("target attribute or pragma changes AltiVec ABI");
4778 else
4779 rs6000_altivec_abi = 1;
4782 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4783 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4784 be explicitly overridden in either case. */
4785 if (TARGET_ELF)
4787 if (!global_options_set.x_rs6000_altivec_abi
4788 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4790 if (main_target_opt != NULL &&
4791 !main_target_opt->x_rs6000_altivec_abi)
4792 error ("target attribute or pragma changes AltiVec ABI");
4793 else
4794 rs6000_altivec_abi = 1;
4798 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4799 So far, the only darwin64 targets are also MACH-O. */
4800 if (TARGET_MACHO
4801 && DEFAULT_ABI == ABI_DARWIN
4802 && TARGET_64BIT)
4804 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4805 error ("target attribute or pragma changes darwin64 ABI");
4806 else
4808 rs6000_darwin64_abi = 1;
4809 /* Default to natural alignment, for better performance. */
4810 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4814 /* Place FP constants in the constant pool instead of TOC
4815 if section anchors enabled. */
4816 if (flag_section_anchors
4817 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4818 TARGET_NO_FP_IN_TOC = 1;
4820 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4821 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4823 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4824 SUBTARGET_OVERRIDE_OPTIONS;
4825 #endif
4826 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4827 SUBSUBTARGET_OVERRIDE_OPTIONS;
4828 #endif
4829 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4830 SUB3TARGET_OVERRIDE_OPTIONS;
4831 #endif
4833 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4834 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4836 /* For the E500 family of cores, reset the single/double FP flags to let us
4837 check that they remain constant across attributes or pragmas. Also,
4838 clear a possible request for string instructions, not supported and which
4839 we might have silently queried above for -Os.
4841 For other families, clear ISEL in case it was set implicitly.
4844 switch (rs6000_cpu)
4846 case PROCESSOR_PPC8540:
4847 case PROCESSOR_PPC8548:
4848 case PROCESSOR_PPCE500MC:
4849 case PROCESSOR_PPCE500MC64:
4850 case PROCESSOR_PPCE5500:
4851 case PROCESSOR_PPCE6500:
4853 rs6000_single_float = 0;
4854 rs6000_double_float = 0;
4856 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4858 break;
4860 default:
4862 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4863 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4865 break;
4868 if (main_target_opt)
4870 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4871 error ("target attribute or pragma changes single precision floating "
4872 "point");
4873 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4874 error ("target attribute or pragma changes double precision floating "
4875 "point");
4878 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4879 && rs6000_cpu != PROCESSOR_POWER5
4880 && rs6000_cpu != PROCESSOR_POWER6
4881 && rs6000_cpu != PROCESSOR_POWER7
4882 && rs6000_cpu != PROCESSOR_POWER8
4883 && rs6000_cpu != PROCESSOR_POWER9
4884 && rs6000_cpu != PROCESSOR_PPCA2
4885 && rs6000_cpu != PROCESSOR_CELL
4886 && rs6000_cpu != PROCESSOR_PPC476);
4887 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4888 || rs6000_cpu == PROCESSOR_POWER5
4889 || rs6000_cpu == PROCESSOR_POWER7
4890 || rs6000_cpu == PROCESSOR_POWER8);
4891 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4892 || rs6000_cpu == PROCESSOR_POWER5
4893 || rs6000_cpu == PROCESSOR_POWER6
4894 || rs6000_cpu == PROCESSOR_POWER7
4895 || rs6000_cpu == PROCESSOR_POWER8
4896 || rs6000_cpu == PROCESSOR_POWER9
4897 || rs6000_cpu == PROCESSOR_PPCE500MC
4898 || rs6000_cpu == PROCESSOR_PPCE500MC64
4899 || rs6000_cpu == PROCESSOR_PPCE5500
4900 || rs6000_cpu == PROCESSOR_PPCE6500);
4902 /* Allow debug switches to override the above settings. These are set to -1
4903 in rs6000.opt to indicate the user hasn't directly set the switch. */
4904 if (TARGET_ALWAYS_HINT >= 0)
4905 rs6000_always_hint = TARGET_ALWAYS_HINT;
4907 if (TARGET_SCHED_GROUPS >= 0)
4908 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4910 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4911 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4913 rs6000_sched_restricted_insns_priority
4914 = (rs6000_sched_groups ? 1 : 0);
4916 /* Handle -msched-costly-dep option. */
4917 rs6000_sched_costly_dep
4918 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4920 if (rs6000_sched_costly_dep_str)
4922 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4923 rs6000_sched_costly_dep = no_dep_costly;
4924 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4925 rs6000_sched_costly_dep = all_deps_costly;
4926 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4927 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4928 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4929 rs6000_sched_costly_dep = store_to_load_dep_costly;
4930 else
4931 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4932 atoi (rs6000_sched_costly_dep_str));
4935 /* Handle -minsert-sched-nops option. */
4936 rs6000_sched_insert_nops
4937 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4939 if (rs6000_sched_insert_nops_str)
4941 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4942 rs6000_sched_insert_nops = sched_finish_none;
4943 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4944 rs6000_sched_insert_nops = sched_finish_pad_groups;
4945 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4946 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4947 else
4948 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4949 atoi (rs6000_sched_insert_nops_str));
4952 /* Handle stack protector */
4953 if (!global_options_set.x_rs6000_stack_protector_guard)
4954 #ifdef TARGET_THREAD_SSP_OFFSET
4955 rs6000_stack_protector_guard = SSP_TLS;
4956 #else
4957 rs6000_stack_protector_guard = SSP_GLOBAL;
4958 #endif
4960 #ifdef TARGET_THREAD_SSP_OFFSET
4961 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4962 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4963 #endif
4965 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4967 char *endp;
4968 const char *str = rs6000_stack_protector_guard_offset_str;
4970 errno = 0;
4971 long offset = strtol (str, &endp, 0);
4972 if (!*str || *endp || errno)
4973 error ("%qs is not a valid number "
4974 "in -mstack-protector-guard-offset=", str);
4976 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4977 || (TARGET_64BIT && (offset & 3)))
4978 error ("%qs is not a valid offset "
4979 "in -mstack-protector-guard-offset=", str);
4981 rs6000_stack_protector_guard_offset = offset;
4984 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4986 const char *str = rs6000_stack_protector_guard_reg_str;
4987 int reg = decode_reg_name (str);
4989 if (!IN_RANGE (reg, 1, 31))
4990 error ("%qs is not a valid base register "
4991 "in -mstack-protector-guard-reg=", str);
4993 rs6000_stack_protector_guard_reg = reg;
4996 if (rs6000_stack_protector_guard == SSP_TLS
4997 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4998 error ("-mstack-protector-guard=tls needs a valid base register");
5000 if (global_init_p)
5002 #ifdef TARGET_REGNAMES
5003 /* If the user desires alternate register names, copy in the
5004 alternate names now. */
5005 if (TARGET_REGNAMES)
5006 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
5007 #endif
5009 /* Set aix_struct_return last, after the ABI is determined.
5010 If -maix-struct-return or -msvr4-struct-return was explicitly
5011 used, don't override with the ABI default. */
5012 if (!global_options_set.x_aix_struct_return)
5013 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
5015 #if 0
5016 /* IBM XL compiler defaults to unsigned bitfields. */
5017 if (TARGET_XL_COMPAT)
5018 flag_signed_bitfields = 0;
5019 #endif
5021 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
5022 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
5024 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
5026 /* We can only guarantee the availability of DI pseudo-ops when
5027 assembling for 64-bit targets. */
5028 if (!TARGET_64BIT)
5030 targetm.asm_out.aligned_op.di = NULL;
5031 targetm.asm_out.unaligned_op.di = NULL;
5035 /* Set branch target alignment, if not optimizing for size. */
5036 if (!optimize_size)
5038 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
5039 aligned 8byte to avoid misprediction by the branch predictor. */
5040 if (rs6000_cpu == PROCESSOR_TITAN
5041 || rs6000_cpu == PROCESSOR_CELL)
5043 if (align_functions <= 0)
5044 align_functions = 8;
5045 if (align_jumps <= 0)
5046 align_jumps = 8;
5047 if (align_loops <= 0)
5048 align_loops = 8;
5050 if (rs6000_align_branch_targets)
5052 if (align_functions <= 0)
5053 align_functions = 16;
5054 if (align_jumps <= 0)
5055 align_jumps = 16;
5056 if (align_loops <= 0)
5058 can_override_loop_align = 1;
5059 align_loops = 16;
5062 if (align_jumps_max_skip <= 0)
5063 align_jumps_max_skip = 15;
5064 if (align_loops_max_skip <= 0)
5065 align_loops_max_skip = 15;
5068 /* Arrange to save and restore machine status around nested functions. */
5069 init_machine_status = rs6000_init_machine_status;
5071 /* We should always be splitting complex arguments, but we can't break
5072 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5073 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5074 targetm.calls.split_complex_arg = NULL;
5076 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5077 if (DEFAULT_ABI == ABI_AIX)
5078 targetm.calls.custom_function_descriptors = 0;
5081 /* Initialize rs6000_cost with the appropriate target costs. */
5082 if (optimize_size)
5083 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5084 else
5085 switch (rs6000_cpu)
5087 case PROCESSOR_RS64A:
5088 rs6000_cost = &rs64a_cost;
5089 break;
5091 case PROCESSOR_MPCCORE:
5092 rs6000_cost = &mpccore_cost;
5093 break;
5095 case PROCESSOR_PPC403:
5096 rs6000_cost = &ppc403_cost;
5097 break;
5099 case PROCESSOR_PPC405:
5100 rs6000_cost = &ppc405_cost;
5101 break;
5103 case PROCESSOR_PPC440:
5104 rs6000_cost = &ppc440_cost;
5105 break;
5107 case PROCESSOR_PPC476:
5108 rs6000_cost = &ppc476_cost;
5109 break;
5111 case PROCESSOR_PPC601:
5112 rs6000_cost = &ppc601_cost;
5113 break;
5115 case PROCESSOR_PPC603:
5116 rs6000_cost = &ppc603_cost;
5117 break;
5119 case PROCESSOR_PPC604:
5120 rs6000_cost = &ppc604_cost;
5121 break;
5123 case PROCESSOR_PPC604e:
5124 rs6000_cost = &ppc604e_cost;
5125 break;
5127 case PROCESSOR_PPC620:
5128 rs6000_cost = &ppc620_cost;
5129 break;
5131 case PROCESSOR_PPC630:
5132 rs6000_cost = &ppc630_cost;
5133 break;
5135 case PROCESSOR_CELL:
5136 rs6000_cost = &ppccell_cost;
5137 break;
5139 case PROCESSOR_PPC750:
5140 case PROCESSOR_PPC7400:
5141 rs6000_cost = &ppc750_cost;
5142 break;
5144 case PROCESSOR_PPC7450:
5145 rs6000_cost = &ppc7450_cost;
5146 break;
5148 case PROCESSOR_PPC8540:
5149 case PROCESSOR_PPC8548:
5150 rs6000_cost = &ppc8540_cost;
5151 break;
5153 case PROCESSOR_PPCE300C2:
5154 case PROCESSOR_PPCE300C3:
5155 rs6000_cost = &ppce300c2c3_cost;
5156 break;
5158 case PROCESSOR_PPCE500MC:
5159 rs6000_cost = &ppce500mc_cost;
5160 break;
5162 case PROCESSOR_PPCE500MC64:
5163 rs6000_cost = &ppce500mc64_cost;
5164 break;
5166 case PROCESSOR_PPCE5500:
5167 rs6000_cost = &ppce5500_cost;
5168 break;
5170 case PROCESSOR_PPCE6500:
5171 rs6000_cost = &ppce6500_cost;
5172 break;
5174 case PROCESSOR_TITAN:
5175 rs6000_cost = &titan_cost;
5176 break;
5178 case PROCESSOR_POWER4:
5179 case PROCESSOR_POWER5:
5180 rs6000_cost = &power4_cost;
5181 break;
5183 case PROCESSOR_POWER6:
5184 rs6000_cost = &power6_cost;
5185 break;
5187 case PROCESSOR_POWER7:
5188 rs6000_cost = &power7_cost;
5189 break;
5191 case PROCESSOR_POWER8:
5192 rs6000_cost = &power8_cost;
5193 break;
5195 case PROCESSOR_POWER9:
5196 rs6000_cost = &power9_cost;
5197 break;
5199 case PROCESSOR_PPCA2:
5200 rs6000_cost = &ppca2_cost;
5201 break;
5203 default:
5204 gcc_unreachable ();
5207 if (global_init_p)
5209 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5210 rs6000_cost->simultaneous_prefetches,
5211 global_options.x_param_values,
5212 global_options_set.x_param_values);
5213 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5214 global_options.x_param_values,
5215 global_options_set.x_param_values);
5216 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5217 rs6000_cost->cache_line_size,
5218 global_options.x_param_values,
5219 global_options_set.x_param_values);
5220 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5221 global_options.x_param_values,
5222 global_options_set.x_param_values);
5224 /* Increase loop peeling limits based on performance analysis. */
5225 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5226 global_options.x_param_values,
5227 global_options_set.x_param_values);
5228 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5229 global_options.x_param_values,
5230 global_options_set.x_param_values);
5232 /* Use the 'model' -fsched-pressure algorithm by default. */
5233 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5234 SCHED_PRESSURE_MODEL,
5235 global_options.x_param_values,
5236 global_options_set.x_param_values);
5238 /* If using typedef char *va_list, signal that
5239 __builtin_va_start (&ap, 0) can be optimized to
5240 ap = __builtin_next_arg (0). */
5241 if (DEFAULT_ABI != ABI_V4)
5242 targetm.expand_builtin_va_start = NULL;
5245 /* Set up single/double float flags.
5246 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5247 then set both flags. */
5248 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5249 rs6000_single_float = rs6000_double_float = 1;
5251 /* If not explicitly specified via option, decide whether to generate indexed
5252 load/store instructions. A value of -1 indicates that the
5253 initial value of this variable has not been overwritten. During
5254 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5255 if (TARGET_AVOID_XFORM == -1)
5256 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5257 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5258 need indexed accesses and the type used is the scalar type of the element
5259 being loaded or stored. */
5260 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5261 && !TARGET_ALTIVEC);
5263 /* Set the -mrecip options. */
5264 if (rs6000_recip_name)
5266 char *p = ASTRDUP (rs6000_recip_name);
5267 char *q;
5268 unsigned int mask, i;
5269 bool invert;
5271 while ((q = strtok (p, ",")) != NULL)
5273 p = NULL;
5274 if (*q == '!')
5276 invert = true;
5277 q++;
5279 else
5280 invert = false;
5282 if (!strcmp (q, "default"))
5283 mask = ((TARGET_RECIP_PRECISION)
5284 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5285 else
5287 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5288 if (!strcmp (q, recip_options[i].string))
5290 mask = recip_options[i].mask;
5291 break;
5294 if (i == ARRAY_SIZE (recip_options))
5296 error ("unknown option for -mrecip=%s", q);
5297 invert = false;
5298 mask = 0;
5299 ret = false;
5303 if (invert)
5304 rs6000_recip_control &= ~mask;
5305 else
5306 rs6000_recip_control |= mask;
5310 /* Set the builtin mask of the various options used that could affect which
5311 builtins were used. In the past we used target_flags, but we've run out
5312 of bits, and some options like PAIRED are no longer in target_flags. */
5313 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5314 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5315 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5316 rs6000_builtin_mask);
5318 /* Initialize all of the registers. */
5319 rs6000_init_hard_regno_mode_ok (global_init_p);
5321 /* Save the initial options in case the user does function specific options */
5322 if (global_init_p)
5323 target_option_default_node = target_option_current_node
5324 = build_target_option_node (&global_options);
5326 /* If not explicitly specified via option, decide whether to generate the
5327 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5328 if (TARGET_LINK_STACK == -1)
5329 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5331 return ret;
5334 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5335 define the target cpu type. */
5337 static void
5338 rs6000_option_override (void)
5340 (void) rs6000_option_override_internal (true);
5344 /* Implement targetm.vectorize.builtin_mask_for_load. */
5345 static tree
5346 rs6000_builtin_mask_for_load (void)
5348 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5349 if ((TARGET_ALTIVEC && !TARGET_VSX)
5350 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5351 return altivec_builtin_mask_for_load;
5352 else
5353 return 0;
5356 /* Implement LOOP_ALIGN. */
5358 rs6000_loop_align (rtx label)
5360 basic_block bb;
5361 int ninsns;
5363 /* Don't override loop alignment if -falign-loops was specified. */
5364 if (!can_override_loop_align)
5365 return align_loops_log;
5367 bb = BLOCK_FOR_INSN (label);
5368 ninsns = num_loop_insns(bb->loop_father);
5370 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5371 if (ninsns > 4 && ninsns <= 8
5372 && (rs6000_cpu == PROCESSOR_POWER4
5373 || rs6000_cpu == PROCESSOR_POWER5
5374 || rs6000_cpu == PROCESSOR_POWER6
5375 || rs6000_cpu == PROCESSOR_POWER7
5376 || rs6000_cpu == PROCESSOR_POWER8
5377 || rs6000_cpu == PROCESSOR_POWER9))
5378 return 5;
5379 else
5380 return align_loops_log;
5383 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5384 static int
5385 rs6000_loop_align_max_skip (rtx_insn *label)
5387 return (1 << rs6000_loop_align (label)) - 1;
5390 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5391 after applying N number of iterations. This routine does not determine
5392 how may iterations are required to reach desired alignment. */
5394 static bool
5395 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5397 if (is_packed)
5398 return false;
5400 if (TARGET_32BIT)
5402 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5403 return true;
5405 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5406 return true;
5408 return false;
5410 else
5412 if (TARGET_MACHO)
5413 return false;
5415 /* Assuming that all other types are naturally aligned. CHECKME! */
5416 return true;
5420 /* Return true if the vector misalignment factor is supported by the
5421 target. */
5422 static bool
5423 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5424 const_tree type,
5425 int misalignment,
5426 bool is_packed)
5428 if (TARGET_VSX)
5430 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5431 return true;
5433 /* Return if movmisalign pattern is not supported for this mode. */
5434 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5435 return false;
5437 if (misalignment == -1)
5439 /* Misalignment factor is unknown at compile time but we know
5440 it's word aligned. */
5441 if (rs6000_vector_alignment_reachable (type, is_packed))
5443 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5445 if (element_size == 64 || element_size == 32)
5446 return true;
5449 return false;
5452 /* VSX supports word-aligned vector. */
5453 if (misalignment % 4 == 0)
5454 return true;
5456 return false;
5459 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5460 static int
5461 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5462 tree vectype, int misalign)
5464 unsigned elements;
5465 tree elem_type;
5467 switch (type_of_cost)
5469 case scalar_stmt:
5470 case scalar_load:
5471 case scalar_store:
5472 case vector_stmt:
5473 case vector_load:
5474 case vector_store:
5475 case vec_to_scalar:
5476 case scalar_to_vec:
5477 case cond_branch_not_taken:
5478 return 1;
5480 case vec_perm:
5481 if (TARGET_VSX)
5482 return 3;
5483 else
5484 return 1;
5486 case vec_promote_demote:
5487 if (TARGET_VSX)
5488 return 4;
5489 else
5490 return 1;
5492 case cond_branch_taken:
5493 return 3;
5495 case unaligned_load:
5496 if (TARGET_P9_VECTOR)
5497 return 3;
5499 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5500 return 1;
5502 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5504 elements = TYPE_VECTOR_SUBPARTS (vectype);
5505 if (elements == 2)
5506 /* Double word aligned. */
5507 return 2;
5509 if (elements == 4)
5511 switch (misalign)
5513 case 8:
5514 /* Double word aligned. */
5515 return 2;
5517 case -1:
5518 /* Unknown misalignment. */
5519 case 4:
5520 case 12:
5521 /* Word aligned. */
5522 return 22;
5524 default:
5525 gcc_unreachable ();
5530 if (TARGET_ALTIVEC)
5531 /* Misaligned loads are not supported. */
5532 gcc_unreachable ();
5534 return 2;
5536 case unaligned_store:
5537 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5538 return 1;
5540 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5542 elements = TYPE_VECTOR_SUBPARTS (vectype);
5543 if (elements == 2)
5544 /* Double word aligned. */
5545 return 2;
5547 if (elements == 4)
5549 switch (misalign)
5551 case 8:
5552 /* Double word aligned. */
5553 return 2;
5555 case -1:
5556 /* Unknown misalignment. */
5557 case 4:
5558 case 12:
5559 /* Word aligned. */
5560 return 23;
5562 default:
5563 gcc_unreachable ();
5568 if (TARGET_ALTIVEC)
5569 /* Misaligned stores are not supported. */
5570 gcc_unreachable ();
5572 return 2;
5574 case vec_construct:
5575 /* This is a rough approximation assuming non-constant elements
5576 constructed into a vector via element insertion. FIXME:
5577 vec_construct is not granular enough for uniformly good
5578 decisions. If the initialization is a splat, this is
5579 cheaper than we estimate. Improve this someday. */
5580 elem_type = TREE_TYPE (vectype);
5581 /* 32-bit vectors loaded into registers are stored as double
5582 precision, so we need 2 permutes, 2 converts, and 1 merge
5583 to construct a vector of short floats from them. */
5584 if (SCALAR_FLOAT_TYPE_P (elem_type)
5585 && TYPE_PRECISION (elem_type) == 32)
5586 return 5;
5587 /* On POWER9, integer vector types are built up in GPRs and then
5588 use a direct move (2 cycles). For POWER8 this is even worse,
5589 as we need two direct moves and a merge, and the direct moves
5590 are five cycles. */
5591 else if (INTEGRAL_TYPE_P (elem_type))
5593 if (TARGET_P9_VECTOR)
5594 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5595 else
5596 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5598 else
5599 /* V2DFmode doesn't need a direct move. */
5600 return 2;
5602 default:
5603 gcc_unreachable ();
5607 /* Implement targetm.vectorize.preferred_simd_mode. */
5609 static machine_mode
5610 rs6000_preferred_simd_mode (machine_mode mode)
5612 if (TARGET_VSX)
5613 switch (mode)
5615 case DFmode:
5616 return V2DFmode;
5617 default:;
5619 if (TARGET_ALTIVEC || TARGET_VSX)
5620 switch (mode)
5622 case SFmode:
5623 return V4SFmode;
5624 case TImode:
5625 return V1TImode;
5626 case DImode:
5627 return V2DImode;
5628 case SImode:
5629 return V4SImode;
5630 case HImode:
5631 return V8HImode;
5632 case QImode:
5633 return V16QImode;
5634 default:;
5636 if (TARGET_PAIRED_FLOAT
5637 && mode == SFmode)
5638 return V2SFmode;
5639 return word_mode;
5642 typedef struct _rs6000_cost_data
5644 struct loop *loop_info;
5645 unsigned cost[3];
5646 } rs6000_cost_data;
5648 /* Test for likely overcommitment of vector hardware resources. If a
5649 loop iteration is relatively large, and too large a percentage of
5650 instructions in the loop are vectorized, the cost model may not
5651 adequately reflect delays from unavailable vector resources.
5652 Penalize the loop body cost for this case. */
5654 static void
5655 rs6000_density_test (rs6000_cost_data *data)
5657 const int DENSITY_PCT_THRESHOLD = 85;
5658 const int DENSITY_SIZE_THRESHOLD = 70;
5659 const int DENSITY_PENALTY = 10;
5660 struct loop *loop = data->loop_info;
5661 basic_block *bbs = get_loop_body (loop);
5662 int nbbs = loop->num_nodes;
5663 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5664 int i, density_pct;
5666 for (i = 0; i < nbbs; i++)
5668 basic_block bb = bbs[i];
5669 gimple_stmt_iterator gsi;
5671 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5673 gimple *stmt = gsi_stmt (gsi);
5674 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5676 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5677 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5678 not_vec_cost++;
5682 free (bbs);
5683 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5685 if (density_pct > DENSITY_PCT_THRESHOLD
5686 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5688 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5689 if (dump_enabled_p ())
5690 dump_printf_loc (MSG_NOTE, vect_location,
5691 "density %d%%, cost %d exceeds threshold, penalizing "
5692 "loop body cost by %d%%", density_pct,
5693 vec_cost + not_vec_cost, DENSITY_PENALTY);
5697 /* Implement targetm.vectorize.init_cost. */
5699 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5700 instruction is needed by the vectorization. */
5701 static bool rs6000_vect_nonmem;
5703 static void *
5704 rs6000_init_cost (struct loop *loop_info)
5706 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5707 data->loop_info = loop_info;
5708 data->cost[vect_prologue] = 0;
5709 data->cost[vect_body] = 0;
5710 data->cost[vect_epilogue] = 0;
5711 rs6000_vect_nonmem = false;
5712 return data;
5715 /* Implement targetm.vectorize.add_stmt_cost. */
5717 static unsigned
5718 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5719 struct _stmt_vec_info *stmt_info, int misalign,
5720 enum vect_cost_model_location where)
5722 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5723 unsigned retval = 0;
5725 if (flag_vect_cost_model)
5727 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5728 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5729 misalign);
5730 /* Statements in an inner loop relative to the loop being
5731 vectorized are weighted more heavily. The value here is
5732 arbitrary and could potentially be improved with analysis. */
5733 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5734 count *= 50; /* FIXME. */
5736 retval = (unsigned) (count * stmt_cost);
5737 cost_data->cost[where] += retval;
5739 /* Check whether we're doing something other than just a copy loop.
5740 Not all such loops may be profitably vectorized; see
5741 rs6000_finish_cost. */
5742 if ((kind == vec_to_scalar || kind == vec_perm
5743 || kind == vec_promote_demote || kind == vec_construct
5744 || kind == scalar_to_vec)
5745 || (where == vect_body && kind == vector_stmt))
5746 rs6000_vect_nonmem = true;
5749 return retval;
5752 /* Implement targetm.vectorize.finish_cost. */
5754 static void
5755 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5756 unsigned *body_cost, unsigned *epilogue_cost)
5758 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5760 if (cost_data->loop_info)
5761 rs6000_density_test (cost_data);
5763 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5764 that require versioning for any reason. The vectorization is at
5765 best a wash inside the loop, and the versioning checks make
5766 profitability highly unlikely and potentially quite harmful. */
5767 if (cost_data->loop_info)
5769 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5770 if (!rs6000_vect_nonmem
5771 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5772 && LOOP_REQUIRES_VERSIONING (vec_info))
5773 cost_data->cost[vect_body] += 10000;
5776 *prologue_cost = cost_data->cost[vect_prologue];
5777 *body_cost = cost_data->cost[vect_body];
5778 *epilogue_cost = cost_data->cost[vect_epilogue];
5781 /* Implement targetm.vectorize.destroy_cost_data. */
5783 static void
5784 rs6000_destroy_cost_data (void *data)
5786 free (data);
5789 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5790 library with vectorized intrinsics. */
5792 static tree
5793 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5794 tree type_in)
5796 char name[32];
5797 const char *suffix = NULL;
5798 tree fntype, new_fndecl, bdecl = NULL_TREE;
5799 int n_args = 1;
5800 const char *bname;
5801 machine_mode el_mode, in_mode;
5802 int n, in_n;
5804 /* Libmass is suitable for unsafe math only as it does not correctly support
5805 parts of IEEE with the required precision such as denormals. Only support
5806 it if we have VSX to use the simd d2 or f4 functions.
5807 XXX: Add variable length support. */
5808 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5809 return NULL_TREE;
5811 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5812 n = TYPE_VECTOR_SUBPARTS (type_out);
5813 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5814 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5815 if (el_mode != in_mode
5816 || n != in_n)
5817 return NULL_TREE;
5819 switch (fn)
5821 CASE_CFN_ATAN2:
5822 CASE_CFN_HYPOT:
5823 CASE_CFN_POW:
5824 n_args = 2;
5825 gcc_fallthrough ();
5827 CASE_CFN_ACOS:
5828 CASE_CFN_ACOSH:
5829 CASE_CFN_ASIN:
5830 CASE_CFN_ASINH:
5831 CASE_CFN_ATAN:
5832 CASE_CFN_ATANH:
5833 CASE_CFN_CBRT:
5834 CASE_CFN_COS:
5835 CASE_CFN_COSH:
5836 CASE_CFN_ERF:
5837 CASE_CFN_ERFC:
5838 CASE_CFN_EXP2:
5839 CASE_CFN_EXP:
5840 CASE_CFN_EXPM1:
5841 CASE_CFN_LGAMMA:
5842 CASE_CFN_LOG10:
5843 CASE_CFN_LOG1P:
5844 CASE_CFN_LOG2:
5845 CASE_CFN_LOG:
5846 CASE_CFN_SIN:
5847 CASE_CFN_SINH:
5848 CASE_CFN_SQRT:
5849 CASE_CFN_TAN:
5850 CASE_CFN_TANH:
5851 if (el_mode == DFmode && n == 2)
5853 bdecl = mathfn_built_in (double_type_node, fn);
5854 suffix = "d2"; /* pow -> powd2 */
5856 else if (el_mode == SFmode && n == 4)
5858 bdecl = mathfn_built_in (float_type_node, fn);
5859 suffix = "4"; /* powf -> powf4 */
5861 else
5862 return NULL_TREE;
5863 if (!bdecl)
5864 return NULL_TREE;
5865 break;
5867 default:
5868 return NULL_TREE;
5871 gcc_assert (suffix != NULL);
5872 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5873 if (!bname)
5874 return NULL_TREE;
5876 strcpy (name, bname + sizeof ("__builtin_") - 1);
5877 strcat (name, suffix);
5879 if (n_args == 1)
5880 fntype = build_function_type_list (type_out, type_in, NULL);
5881 else if (n_args == 2)
5882 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5883 else
5884 gcc_unreachable ();
5886 /* Build a function declaration for the vectorized function. */
5887 new_fndecl = build_decl (BUILTINS_LOCATION,
5888 FUNCTION_DECL, get_identifier (name), fntype);
5889 TREE_PUBLIC (new_fndecl) = 1;
5890 DECL_EXTERNAL (new_fndecl) = 1;
5891 DECL_IS_NOVOPS (new_fndecl) = 1;
5892 TREE_READONLY (new_fndecl) = 1;
5894 return new_fndecl;
5897 /* Returns a function decl for a vectorized version of the builtin function
5898 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5899 if it is not available. */
5901 static tree
5902 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5903 tree type_in)
5905 machine_mode in_mode, out_mode;
5906 int in_n, out_n;
5908 if (TARGET_DEBUG_BUILTIN)
5909 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5910 combined_fn_name (combined_fn (fn)),
5911 GET_MODE_NAME (TYPE_MODE (type_out)),
5912 GET_MODE_NAME (TYPE_MODE (type_in)));
5914 if (TREE_CODE (type_out) != VECTOR_TYPE
5915 || TREE_CODE (type_in) != VECTOR_TYPE
5916 || !TARGET_VECTORIZE_BUILTINS)
5917 return NULL_TREE;
5919 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5920 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5921 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5922 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5924 switch (fn)
5926 CASE_CFN_COPYSIGN:
5927 if (VECTOR_UNIT_VSX_P (V2DFmode)
5928 && out_mode == DFmode && out_n == 2
5929 && in_mode == DFmode && in_n == 2)
5930 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5931 if (VECTOR_UNIT_VSX_P (V4SFmode)
5932 && out_mode == SFmode && out_n == 4
5933 && in_mode == SFmode && in_n == 4)
5934 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5935 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5936 && out_mode == SFmode && out_n == 4
5937 && in_mode == SFmode && in_n == 4)
5938 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5939 break;
5940 CASE_CFN_CEIL:
5941 if (VECTOR_UNIT_VSX_P (V2DFmode)
5942 && out_mode == DFmode && out_n == 2
5943 && in_mode == DFmode && in_n == 2)
5944 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5945 if (VECTOR_UNIT_VSX_P (V4SFmode)
5946 && out_mode == SFmode && out_n == 4
5947 && in_mode == SFmode && in_n == 4)
5948 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5949 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5950 && out_mode == SFmode && out_n == 4
5951 && in_mode == SFmode && in_n == 4)
5952 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5953 break;
5954 CASE_CFN_FLOOR:
5955 if (VECTOR_UNIT_VSX_P (V2DFmode)
5956 && out_mode == DFmode && out_n == 2
5957 && in_mode == DFmode && in_n == 2)
5958 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5959 if (VECTOR_UNIT_VSX_P (V4SFmode)
5960 && out_mode == SFmode && out_n == 4
5961 && in_mode == SFmode && in_n == 4)
5962 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5963 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5964 && out_mode == SFmode && out_n == 4
5965 && in_mode == SFmode && in_n == 4)
5966 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5967 break;
5968 CASE_CFN_FMA:
5969 if (VECTOR_UNIT_VSX_P (V2DFmode)
5970 && out_mode == DFmode && out_n == 2
5971 && in_mode == DFmode && in_n == 2)
5972 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5973 if (VECTOR_UNIT_VSX_P (V4SFmode)
5974 && out_mode == SFmode && out_n == 4
5975 && in_mode == SFmode && in_n == 4)
5976 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5977 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5978 && out_mode == SFmode && out_n == 4
5979 && in_mode == SFmode && in_n == 4)
5980 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5981 break;
5982 CASE_CFN_TRUNC:
5983 if (VECTOR_UNIT_VSX_P (V2DFmode)
5984 && out_mode == DFmode && out_n == 2
5985 && in_mode == DFmode && in_n == 2)
5986 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5987 if (VECTOR_UNIT_VSX_P (V4SFmode)
5988 && out_mode == SFmode && out_n == 4
5989 && in_mode == SFmode && in_n == 4)
5990 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5991 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5992 && out_mode == SFmode && out_n == 4
5993 && in_mode == SFmode && in_n == 4)
5994 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5995 break;
5996 CASE_CFN_NEARBYINT:
5997 if (VECTOR_UNIT_VSX_P (V2DFmode)
5998 && flag_unsafe_math_optimizations
5999 && out_mode == DFmode && out_n == 2
6000 && in_mode == DFmode && in_n == 2)
6001 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
6002 if (VECTOR_UNIT_VSX_P (V4SFmode)
6003 && flag_unsafe_math_optimizations
6004 && out_mode == SFmode && out_n == 4
6005 && in_mode == SFmode && in_n == 4)
6006 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
6007 break;
6008 CASE_CFN_RINT:
6009 if (VECTOR_UNIT_VSX_P (V2DFmode)
6010 && !flag_trapping_math
6011 && out_mode == DFmode && out_n == 2
6012 && in_mode == DFmode && in_n == 2)
6013 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
6014 if (VECTOR_UNIT_VSX_P (V4SFmode)
6015 && !flag_trapping_math
6016 && out_mode == SFmode && out_n == 4
6017 && in_mode == SFmode && in_n == 4)
6018 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
6019 break;
6020 default:
6021 break;
6024 /* Generate calls to libmass if appropriate. */
6025 if (rs6000_veclib_handler)
6026 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
6028 return NULL_TREE;
6031 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
6033 static tree
6034 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
6035 tree type_in)
6037 machine_mode in_mode, out_mode;
6038 int in_n, out_n;
6040 if (TARGET_DEBUG_BUILTIN)
6041 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6042 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6043 GET_MODE_NAME (TYPE_MODE (type_out)),
6044 GET_MODE_NAME (TYPE_MODE (type_in)));
6046 if (TREE_CODE (type_out) != VECTOR_TYPE
6047 || TREE_CODE (type_in) != VECTOR_TYPE
6048 || !TARGET_VECTORIZE_BUILTINS)
6049 return NULL_TREE;
6051 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6052 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6053 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6054 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6056 enum rs6000_builtins fn
6057 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6058 switch (fn)
6060 case RS6000_BUILTIN_RSQRTF:
6061 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6062 && out_mode == SFmode && out_n == 4
6063 && in_mode == SFmode && in_n == 4)
6064 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6065 break;
6066 case RS6000_BUILTIN_RSQRT:
6067 if (VECTOR_UNIT_VSX_P (V2DFmode)
6068 && out_mode == DFmode && out_n == 2
6069 && in_mode == DFmode && in_n == 2)
6070 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6071 break;
6072 case RS6000_BUILTIN_RECIPF:
6073 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6074 && out_mode == SFmode && out_n == 4
6075 && in_mode == SFmode && in_n == 4)
6076 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6077 break;
6078 case RS6000_BUILTIN_RECIP:
6079 if (VECTOR_UNIT_VSX_P (V2DFmode)
6080 && out_mode == DFmode && out_n == 2
6081 && in_mode == DFmode && in_n == 2)
6082 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6083 break;
6084 default:
6085 break;
6087 return NULL_TREE;
6090 /* Default CPU string for rs6000*_file_start functions. */
6091 static const char *rs6000_default_cpu;
6093 /* Do anything needed at the start of the asm file. */
6095 static void
6096 rs6000_file_start (void)
6098 char buffer[80];
6099 const char *start = buffer;
6100 FILE *file = asm_out_file;
6102 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6104 default_file_start ();
6106 if (flag_verbose_asm)
6108 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6110 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6112 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6113 start = "";
6116 if (global_options_set.x_rs6000_cpu_index)
6118 fprintf (file, "%s -mcpu=%s", start,
6119 processor_target_table[rs6000_cpu_index].name);
6120 start = "";
6123 if (global_options_set.x_rs6000_tune_index)
6125 fprintf (file, "%s -mtune=%s", start,
6126 processor_target_table[rs6000_tune_index].name);
6127 start = "";
6130 if (PPC405_ERRATUM77)
6132 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6133 start = "";
6136 #ifdef USING_ELFOS_H
6137 switch (rs6000_sdata)
6139 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6140 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6141 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6142 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6145 if (rs6000_sdata && g_switch_value)
6147 fprintf (file, "%s -G %d", start,
6148 g_switch_value);
6149 start = "";
6151 #endif
6153 if (*start == '\0')
6154 putc ('\n', file);
6157 #ifdef USING_ELFOS_H
6158 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6159 && !global_options_set.x_rs6000_cpu_index)
6161 fputs ("\t.machine ", asm_out_file);
6162 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6163 fputs ("power9\n", asm_out_file);
6164 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6165 fputs ("power8\n", asm_out_file);
6166 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6167 fputs ("power7\n", asm_out_file);
6168 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6169 fputs ("power6\n", asm_out_file);
6170 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6171 fputs ("power5\n", asm_out_file);
6172 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6173 fputs ("power4\n", asm_out_file);
6174 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6175 fputs ("ppc64\n", asm_out_file);
6176 else
6177 fputs ("ppc\n", asm_out_file);
6179 #endif
6181 if (DEFAULT_ABI == ABI_ELFv2)
6182 fprintf (file, "\t.abiversion 2\n");
6186 /* Return nonzero if this function is known to have a null epilogue. */
6189 direct_return (void)
6191 if (reload_completed)
6193 rs6000_stack_t *info = rs6000_stack_info ();
6195 if (info->first_gp_reg_save == 32
6196 && info->first_fp_reg_save == 64
6197 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6198 && ! info->lr_save_p
6199 && ! info->cr_save_p
6200 && info->vrsave_size == 0
6201 && ! info->push_p)
6202 return 1;
6205 return 0;
6208 /* Return the number of instructions it takes to form a constant in an
6209 integer register. */
6212 num_insns_constant_wide (HOST_WIDE_INT value)
6214 /* signed constant loadable with addi */
6215 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6216 return 1;
6218 /* constant loadable with addis */
6219 else if ((value & 0xffff) == 0
6220 && (value >> 31 == -1 || value >> 31 == 0))
6221 return 1;
6223 else if (TARGET_POWERPC64)
6225 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6226 HOST_WIDE_INT high = value >> 31;
6228 if (high == 0 || high == -1)
6229 return 2;
6231 high >>= 1;
6233 if (low == 0)
6234 return num_insns_constant_wide (high) + 1;
6235 else if (high == 0)
6236 return num_insns_constant_wide (low) + 1;
6237 else
6238 return (num_insns_constant_wide (high)
6239 + num_insns_constant_wide (low) + 1);
6242 else
6243 return 2;
6247 num_insns_constant (rtx op, machine_mode mode)
6249 HOST_WIDE_INT low, high;
6251 switch (GET_CODE (op))
6253 case CONST_INT:
6254 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6255 && rs6000_is_valid_and_mask (op, mode))
6256 return 2;
6257 else
6258 return num_insns_constant_wide (INTVAL (op));
6260 case CONST_WIDE_INT:
6262 int i;
6263 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6264 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6265 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6266 return ins;
6269 case CONST_DOUBLE:
6270 if (mode == SFmode || mode == SDmode)
6272 long l;
6274 if (DECIMAL_FLOAT_MODE_P (mode))
6275 REAL_VALUE_TO_TARGET_DECIMAL32
6276 (*CONST_DOUBLE_REAL_VALUE (op), l);
6277 else
6278 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6279 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6282 long l[2];
6283 if (DECIMAL_FLOAT_MODE_P (mode))
6284 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6285 else
6286 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6287 high = l[WORDS_BIG_ENDIAN == 0];
6288 low = l[WORDS_BIG_ENDIAN != 0];
6290 if (TARGET_32BIT)
6291 return (num_insns_constant_wide (low)
6292 + num_insns_constant_wide (high));
6293 else
6295 if ((high == 0 && low >= 0)
6296 || (high == -1 && low < 0))
6297 return num_insns_constant_wide (low);
6299 else if (rs6000_is_valid_and_mask (op, mode))
6300 return 2;
6302 else if (low == 0)
6303 return num_insns_constant_wide (high) + 1;
6305 else
6306 return (num_insns_constant_wide (high)
6307 + num_insns_constant_wide (low) + 1);
6310 default:
6311 gcc_unreachable ();
6315 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6316 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6317 corresponding element of the vector, but for V4SFmode and V2SFmode,
6318 the corresponding "float" is interpreted as an SImode integer. */
6320 HOST_WIDE_INT
6321 const_vector_elt_as_int (rtx op, unsigned int elt)
6323 rtx tmp;
6325 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6326 gcc_assert (GET_MODE (op) != V2DImode
6327 && GET_MODE (op) != V2DFmode);
6329 tmp = CONST_VECTOR_ELT (op, elt);
6330 if (GET_MODE (op) == V4SFmode
6331 || GET_MODE (op) == V2SFmode)
6332 tmp = gen_lowpart (SImode, tmp);
6333 return INTVAL (tmp);
6336 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6337 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6338 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6339 all items are set to the same value and contain COPIES replicas of the
6340 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6341 operand and the others are set to the value of the operand's msb. */
6343 static bool
6344 vspltis_constant (rtx op, unsigned step, unsigned copies)
6346 machine_mode mode = GET_MODE (op);
6347 machine_mode inner = GET_MODE_INNER (mode);
6349 unsigned i;
6350 unsigned nunits;
6351 unsigned bitsize;
6352 unsigned mask;
6354 HOST_WIDE_INT val;
6355 HOST_WIDE_INT splat_val;
6356 HOST_WIDE_INT msb_val;
6358 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6359 return false;
6361 nunits = GET_MODE_NUNITS (mode);
6362 bitsize = GET_MODE_BITSIZE (inner);
6363 mask = GET_MODE_MASK (inner);
6365 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6366 splat_val = val;
6367 msb_val = val >= 0 ? 0 : -1;
6369 /* Construct the value to be splatted, if possible. If not, return 0. */
6370 for (i = 2; i <= copies; i *= 2)
6372 HOST_WIDE_INT small_val;
6373 bitsize /= 2;
6374 small_val = splat_val >> bitsize;
6375 mask >>= bitsize;
6376 if (splat_val != ((HOST_WIDE_INT)
6377 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6378 | (small_val & mask)))
6379 return false;
6380 splat_val = small_val;
6383 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6384 if (EASY_VECTOR_15 (splat_val))
6387 /* Also check if we can splat, and then add the result to itself. Do so if
6388 the value is positive, of if the splat instruction is using OP's mode;
6389 for splat_val < 0, the splat and the add should use the same mode. */
6390 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6391 && (splat_val >= 0 || (step == 1 && copies == 1)))
6394 /* Also check if are loading up the most significant bit which can be done by
6395 loading up -1 and shifting the value left by -1. */
6396 else if (EASY_VECTOR_MSB (splat_val, inner))
6399 else
6400 return false;
6402 /* Check if VAL is present in every STEP-th element, and the
6403 other elements are filled with its most significant bit. */
6404 for (i = 1; i < nunits; ++i)
6406 HOST_WIDE_INT desired_val;
6407 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6408 if ((i & (step - 1)) == 0)
6409 desired_val = val;
6410 else
6411 desired_val = msb_val;
6413 if (desired_val != const_vector_elt_as_int (op, elt))
6414 return false;
6417 return true;
6420 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6421 instruction, filling in the bottom elements with 0 or -1.
6423 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6424 for the number of zeroes to shift in, or negative for the number of 0xff
6425 bytes to shift in.
6427 OP is a CONST_VECTOR. */
6430 vspltis_shifted (rtx op)
6432 machine_mode mode = GET_MODE (op);
6433 machine_mode inner = GET_MODE_INNER (mode);
6435 unsigned i, j;
6436 unsigned nunits;
6437 unsigned mask;
6439 HOST_WIDE_INT val;
6441 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6442 return false;
6444 /* We need to create pseudo registers to do the shift, so don't recognize
6445 shift vector constants after reload. */
6446 if (!can_create_pseudo_p ())
6447 return false;
6449 nunits = GET_MODE_NUNITS (mode);
6450 mask = GET_MODE_MASK (inner);
6452 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6454 /* Check if the value can really be the operand of a vspltis[bhw]. */
6455 if (EASY_VECTOR_15 (val))
6458 /* Also check if we are loading up the most significant bit which can be done
6459 by loading up -1 and shifting the value left by -1. */
6460 else if (EASY_VECTOR_MSB (val, inner))
6463 else
6464 return 0;
6466 /* Check if VAL is present in every STEP-th element until we find elements
6467 that are 0 or all 1 bits. */
6468 for (i = 1; i < nunits; ++i)
6470 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6471 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6473 /* If the value isn't the splat value, check for the remaining elements
6474 being 0/-1. */
6475 if (val != elt_val)
6477 if (elt_val == 0)
6479 for (j = i+1; j < nunits; ++j)
6481 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6482 if (const_vector_elt_as_int (op, elt2) != 0)
6483 return 0;
6486 return (nunits - i) * GET_MODE_SIZE (inner);
6489 else if ((elt_val & mask) == mask)
6491 for (j = i+1; j < nunits; ++j)
6493 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6494 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6495 return 0;
6498 return -((nunits - i) * GET_MODE_SIZE (inner));
6501 else
6502 return 0;
6506 /* If all elements are equal, we don't need to do VLSDOI. */
6507 return 0;
6511 /* Return true if OP is of the given MODE and can be synthesized
6512 with a vspltisb, vspltish or vspltisw. */
6514 bool
6515 easy_altivec_constant (rtx op, machine_mode mode)
6517 unsigned step, copies;
6519 if (mode == VOIDmode)
6520 mode = GET_MODE (op);
6521 else if (mode != GET_MODE (op))
6522 return false;
6524 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6525 constants. */
6526 if (mode == V2DFmode)
6527 return zero_constant (op, mode);
6529 else if (mode == V2DImode)
6531 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6532 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6533 return false;
6535 if (zero_constant (op, mode))
6536 return true;
6538 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6539 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6540 return true;
6542 return false;
6545 /* V1TImode is a special container for TImode. Ignore for now. */
6546 else if (mode == V1TImode)
6547 return false;
6549 /* Start with a vspltisw. */
6550 step = GET_MODE_NUNITS (mode) / 4;
6551 copies = 1;
6553 if (vspltis_constant (op, step, copies))
6554 return true;
6556 /* Then try with a vspltish. */
6557 if (step == 1)
6558 copies <<= 1;
6559 else
6560 step >>= 1;
6562 if (vspltis_constant (op, step, copies))
6563 return true;
6565 /* And finally a vspltisb. */
6566 if (step == 1)
6567 copies <<= 1;
6568 else
6569 step >>= 1;
6571 if (vspltis_constant (op, step, copies))
6572 return true;
6574 if (vspltis_shifted (op) != 0)
6575 return true;
6577 return false;
6580 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6581 result is OP. Abort if it is not possible. */
6584 gen_easy_altivec_constant (rtx op)
6586 machine_mode mode = GET_MODE (op);
6587 int nunits = GET_MODE_NUNITS (mode);
6588 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6589 unsigned step = nunits / 4;
6590 unsigned copies = 1;
6592 /* Start with a vspltisw. */
6593 if (vspltis_constant (op, step, copies))
6594 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6596 /* Then try with a vspltish. */
6597 if (step == 1)
6598 copies <<= 1;
6599 else
6600 step >>= 1;
6602 if (vspltis_constant (op, step, copies))
6603 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6605 /* And finally a vspltisb. */
6606 if (step == 1)
6607 copies <<= 1;
6608 else
6609 step >>= 1;
6611 if (vspltis_constant (op, step, copies))
6612 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6614 gcc_unreachable ();
6617 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6618 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6620 Return the number of instructions needed (1 or 2) into the address pointed
6621 via NUM_INSNS_PTR.
6623 Return the constant that is being split via CONSTANT_PTR. */
6625 bool
6626 xxspltib_constant_p (rtx op,
6627 machine_mode mode,
6628 int *num_insns_ptr,
6629 int *constant_ptr)
6631 size_t nunits = GET_MODE_NUNITS (mode);
6632 size_t i;
6633 HOST_WIDE_INT value;
6634 rtx element;
6636 /* Set the returned values to out of bound values. */
6637 *num_insns_ptr = -1;
6638 *constant_ptr = 256;
6640 if (!TARGET_P9_VECTOR)
6641 return false;
6643 if (mode == VOIDmode)
6644 mode = GET_MODE (op);
6646 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6647 return false;
6649 /* Handle (vec_duplicate <constant>). */
6650 if (GET_CODE (op) == VEC_DUPLICATE)
6652 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6653 && mode != V2DImode)
6654 return false;
6656 element = XEXP (op, 0);
6657 if (!CONST_INT_P (element))
6658 return false;
6660 value = INTVAL (element);
6661 if (!IN_RANGE (value, -128, 127))
6662 return false;
6665 /* Handle (const_vector [...]). */
6666 else if (GET_CODE (op) == CONST_VECTOR)
6668 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6669 && mode != V2DImode)
6670 return false;
6672 element = CONST_VECTOR_ELT (op, 0);
6673 if (!CONST_INT_P (element))
6674 return false;
6676 value = INTVAL (element);
6677 if (!IN_RANGE (value, -128, 127))
6678 return false;
6680 for (i = 1; i < nunits; i++)
6682 element = CONST_VECTOR_ELT (op, i);
6683 if (!CONST_INT_P (element))
6684 return false;
6686 if (value != INTVAL (element))
6687 return false;
6691 /* Handle integer constants being loaded into the upper part of the VSX
6692 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6693 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6694 else if (CONST_INT_P (op))
6696 if (!SCALAR_INT_MODE_P (mode))
6697 return false;
6699 value = INTVAL (op);
6700 if (!IN_RANGE (value, -128, 127))
6701 return false;
6703 if (!IN_RANGE (value, -1, 0))
6705 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6706 return false;
6708 if (EASY_VECTOR_15 (value))
6709 return false;
6713 else
6714 return false;
6716 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6717 sign extend. Special case 0/-1 to allow getting any VSX register instead
6718 of an Altivec register. */
6719 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6720 && EASY_VECTOR_15 (value))
6721 return false;
6723 /* Return # of instructions and the constant byte for XXSPLTIB. */
6724 if (mode == V16QImode)
6725 *num_insns_ptr = 1;
6727 else if (IN_RANGE (value, -1, 0))
6728 *num_insns_ptr = 1;
6730 else
6731 *num_insns_ptr = 2;
6733 *constant_ptr = (int) value;
6734 return true;
6737 const char *
6738 output_vec_const_move (rtx *operands)
6740 int shift;
6741 machine_mode mode;
6742 rtx dest, vec;
6744 dest = operands[0];
6745 vec = operands[1];
6746 mode = GET_MODE (dest);
6748 if (TARGET_VSX)
6750 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6751 int xxspltib_value = 256;
6752 int num_insns = -1;
6754 if (zero_constant (vec, mode))
6756 if (TARGET_P9_VECTOR)
6757 return "xxspltib %x0,0";
6759 else if (dest_vmx_p)
6760 return "vspltisw %0,0";
6762 else
6763 return "xxlxor %x0,%x0,%x0";
6766 if (all_ones_constant (vec, mode))
6768 if (TARGET_P9_VECTOR)
6769 return "xxspltib %x0,255";
6771 else if (dest_vmx_p)
6772 return "vspltisw %0,-1";
6774 else if (TARGET_P8_VECTOR)
6775 return "xxlorc %x0,%x0,%x0";
6777 else
6778 gcc_unreachable ();
6781 if (TARGET_P9_VECTOR
6782 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6784 if (num_insns == 1)
6786 operands[2] = GEN_INT (xxspltib_value & 0xff);
6787 return "xxspltib %x0,%2";
6790 return "#";
6794 if (TARGET_ALTIVEC)
6796 rtx splat_vec;
6798 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6799 if (zero_constant (vec, mode))
6800 return "vspltisw %0,0";
6802 if (all_ones_constant (vec, mode))
6803 return "vspltisw %0,-1";
6805 /* Do we need to construct a value using VSLDOI? */
6806 shift = vspltis_shifted (vec);
6807 if (shift != 0)
6808 return "#";
6810 splat_vec = gen_easy_altivec_constant (vec);
6811 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6812 operands[1] = XEXP (splat_vec, 0);
6813 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6814 return "#";
6816 switch (GET_MODE (splat_vec))
6818 case V4SImode:
6819 return "vspltisw %0,%1";
6821 case V8HImode:
6822 return "vspltish %0,%1";
6824 case V16QImode:
6825 return "vspltisb %0,%1";
6827 default:
6828 gcc_unreachable ();
6832 gcc_unreachable ();
6835 /* Initialize TARGET of vector PAIRED to VALS. */
6837 void
6838 paired_expand_vector_init (rtx target, rtx vals)
6840 machine_mode mode = GET_MODE (target);
6841 int n_elts = GET_MODE_NUNITS (mode);
6842 int n_var = 0;
6843 rtx x, new_rtx, tmp, constant_op, op1, op2;
6844 int i;
6846 for (i = 0; i < n_elts; ++i)
6848 x = XVECEXP (vals, 0, i);
6849 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6850 ++n_var;
6852 if (n_var == 0)
6854 /* Load from constant pool. */
6855 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6856 return;
6859 if (n_var == 2)
6861 /* The vector is initialized only with non-constants. */
6862 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6863 XVECEXP (vals, 0, 1));
6865 emit_move_insn (target, new_rtx);
6866 return;
6869 /* One field is non-constant and the other one is a constant. Load the
6870 constant from the constant pool and use ps_merge instruction to
6871 construct the whole vector. */
6872 op1 = XVECEXP (vals, 0, 0);
6873 op2 = XVECEXP (vals, 0, 1);
6875 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6877 tmp = gen_reg_rtx (GET_MODE (constant_op));
6878 emit_move_insn (tmp, constant_op);
6880 if (CONSTANT_P (op1))
6881 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6882 else
6883 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6885 emit_move_insn (target, new_rtx);
6888 void
6889 paired_expand_vector_move (rtx operands[])
6891 rtx op0 = operands[0], op1 = operands[1];
6893 emit_move_insn (op0, op1);
6896 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6897 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6898 operands for the relation operation COND. This is a recursive
6899 function. */
6901 static void
6902 paired_emit_vector_compare (enum rtx_code rcode,
6903 rtx dest, rtx op0, rtx op1,
6904 rtx cc_op0, rtx cc_op1)
6906 rtx tmp = gen_reg_rtx (V2SFmode);
6907 rtx tmp1, max, min;
6909 gcc_assert (TARGET_PAIRED_FLOAT);
6910 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6912 switch (rcode)
6914 case LT:
6915 case LTU:
6916 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6917 return;
6918 case GE:
6919 case GEU:
6920 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6921 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6922 return;
6923 case LE:
6924 case LEU:
6925 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6926 return;
6927 case GT:
6928 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6929 return;
6930 case EQ:
6931 tmp1 = gen_reg_rtx (V2SFmode);
6932 max = gen_reg_rtx (V2SFmode);
6933 min = gen_reg_rtx (V2SFmode);
6934 gen_reg_rtx (V2SFmode);
6936 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6937 emit_insn (gen_selv2sf4
6938 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6939 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6940 emit_insn (gen_selv2sf4
6941 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6942 emit_insn (gen_subv2sf3 (tmp1, min, max));
6943 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6944 return;
6945 case NE:
6946 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6947 return;
6948 case UNLE:
6949 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6950 return;
6951 case UNLT:
6952 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6953 return;
6954 case UNGE:
6955 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6956 return;
6957 case UNGT:
6958 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6959 return;
6960 default:
6961 gcc_unreachable ();
6964 return;
6967 /* Emit vector conditional expression.
6968 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6969 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6972 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6973 rtx cond, rtx cc_op0, rtx cc_op1)
6975 enum rtx_code rcode = GET_CODE (cond);
6977 if (!TARGET_PAIRED_FLOAT)
6978 return 0;
6980 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6982 return 1;
6985 /* Initialize vector TARGET to VALS. */
6987 void
6988 rs6000_expand_vector_init (rtx target, rtx vals)
6990 machine_mode mode = GET_MODE (target);
6991 machine_mode inner_mode = GET_MODE_INNER (mode);
6992 int n_elts = GET_MODE_NUNITS (mode);
6993 int n_var = 0, one_var = -1;
6994 bool all_same = true, all_const_zero = true;
6995 rtx x, mem;
6996 int i;
6998 for (i = 0; i < n_elts; ++i)
7000 x = XVECEXP (vals, 0, i);
7001 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
7002 ++n_var, one_var = i;
7003 else if (x != CONST0_RTX (inner_mode))
7004 all_const_zero = false;
7006 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
7007 all_same = false;
7010 if (n_var == 0)
7012 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
7013 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
7014 if ((int_vector_p || TARGET_VSX) && all_const_zero)
7016 /* Zero register. */
7017 emit_move_insn (target, CONST0_RTX (mode));
7018 return;
7020 else if (int_vector_p && easy_vector_constant (const_vec, mode))
7022 /* Splat immediate. */
7023 emit_insn (gen_rtx_SET (target, const_vec));
7024 return;
7026 else
7028 /* Load from constant pool. */
7029 emit_move_insn (target, const_vec);
7030 return;
7034 /* Double word values on VSX can use xxpermdi or lxvdsx. */
7035 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
7037 rtx op[2];
7038 size_t i;
7039 size_t num_elements = all_same ? 1 : 2;
7040 for (i = 0; i < num_elements; i++)
7042 op[i] = XVECEXP (vals, 0, i);
7043 /* Just in case there is a SUBREG with a smaller mode, do a
7044 conversion. */
7045 if (GET_MODE (op[i]) != inner_mode)
7047 rtx tmp = gen_reg_rtx (inner_mode);
7048 convert_move (tmp, op[i], 0);
7049 op[i] = tmp;
7051 /* Allow load with splat double word. */
7052 else if (MEM_P (op[i]))
7054 if (!all_same)
7055 op[i] = force_reg (inner_mode, op[i]);
7057 else if (!REG_P (op[i]))
7058 op[i] = force_reg (inner_mode, op[i]);
7061 if (all_same)
7063 if (mode == V2DFmode)
7064 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7065 else
7066 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7068 else
7070 if (mode == V2DFmode)
7071 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7072 else
7073 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7075 return;
7078 /* Special case initializing vector int if we are on 64-bit systems with
7079 direct move or we have the ISA 3.0 instructions. */
7080 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7081 && TARGET_DIRECT_MOVE_64BIT)
7083 if (all_same)
7085 rtx element0 = XVECEXP (vals, 0, 0);
7086 if (MEM_P (element0))
7087 element0 = rs6000_address_for_fpconvert (element0);
7088 else
7089 element0 = force_reg (SImode, element0);
7091 if (TARGET_P9_VECTOR)
7092 emit_insn (gen_vsx_splat_v4si (target, element0));
7093 else
7095 rtx tmp = gen_reg_rtx (DImode);
7096 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7097 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7099 return;
7101 else
7103 rtx elements[4];
7104 size_t i;
7106 for (i = 0; i < 4; i++)
7108 elements[i] = XVECEXP (vals, 0, i);
7109 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7110 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7113 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7114 elements[2], elements[3]));
7115 return;
7119 /* With single precision floating point on VSX, know that internally single
7120 precision is actually represented as a double, and either make 2 V2DF
7121 vectors, and convert these vectors to single precision, or do one
7122 conversion, and splat the result to the other elements. */
7123 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7125 if (all_same)
7127 rtx element0 = XVECEXP (vals, 0, 0);
7129 if (TARGET_P9_VECTOR)
7131 if (MEM_P (element0))
7132 element0 = rs6000_address_for_fpconvert (element0);
7134 emit_insn (gen_vsx_splat_v4sf (target, element0));
7137 else
7139 rtx freg = gen_reg_rtx (V4SFmode);
7140 rtx sreg = force_reg (SFmode, element0);
7141 rtx cvt = (TARGET_XSCVDPSPN
7142 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7143 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7145 emit_insn (cvt);
7146 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7147 const0_rtx));
7150 else
7152 rtx dbl_even = gen_reg_rtx (V2DFmode);
7153 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7154 rtx flt_even = gen_reg_rtx (V4SFmode);
7155 rtx flt_odd = gen_reg_rtx (V4SFmode);
7156 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7157 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7158 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7159 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7161 /* Use VMRGEW if we can instead of doing a permute. */
7162 if (TARGET_P8_VECTOR)
7164 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7165 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7166 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7167 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7168 if (BYTES_BIG_ENDIAN)
7169 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7170 else
7171 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7173 else
7175 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7176 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7177 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7178 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7179 rs6000_expand_extract_even (target, flt_even, flt_odd);
7182 return;
7185 /* Special case initializing vector short/char that are splats if we are on
7186 64-bit systems with direct move. */
7187 if (all_same && TARGET_DIRECT_MOVE_64BIT
7188 && (mode == V16QImode || mode == V8HImode))
7190 rtx op0 = XVECEXP (vals, 0, 0);
7191 rtx di_tmp = gen_reg_rtx (DImode);
7193 if (!REG_P (op0))
7194 op0 = force_reg (GET_MODE_INNER (mode), op0);
7196 if (mode == V16QImode)
7198 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7199 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7200 return;
7203 if (mode == V8HImode)
7205 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7206 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7207 return;
7211 /* Store value to stack temp. Load vector element. Splat. However, splat
7212 of 64-bit items is not supported on Altivec. */
7213 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7215 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7216 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7217 XVECEXP (vals, 0, 0));
7218 x = gen_rtx_UNSPEC (VOIDmode,
7219 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7220 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7221 gen_rtvec (2,
7222 gen_rtx_SET (target, mem),
7223 x)));
7224 x = gen_rtx_VEC_SELECT (inner_mode, target,
7225 gen_rtx_PARALLEL (VOIDmode,
7226 gen_rtvec (1, const0_rtx)));
7227 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7228 return;
7231 /* One field is non-constant. Load constant then overwrite
7232 varying field. */
7233 if (n_var == 1)
7235 rtx copy = copy_rtx (vals);
7237 /* Load constant part of vector, substitute neighboring value for
7238 varying element. */
7239 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7240 rs6000_expand_vector_init (target, copy);
7242 /* Insert variable. */
7243 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7244 return;
7247 /* Construct the vector in memory one field at a time
7248 and load the whole vector. */
7249 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7250 for (i = 0; i < n_elts; i++)
7251 emit_move_insn (adjust_address_nv (mem, inner_mode,
7252 i * GET_MODE_SIZE (inner_mode)),
7253 XVECEXP (vals, 0, i));
7254 emit_move_insn (target, mem);
7257 /* Set field ELT of TARGET to VAL. */
7259 void
7260 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7262 machine_mode mode = GET_MODE (target);
7263 machine_mode inner_mode = GET_MODE_INNER (mode);
7264 rtx reg = gen_reg_rtx (mode);
7265 rtx mask, mem, x;
7266 int width = GET_MODE_SIZE (inner_mode);
7267 int i;
7269 val = force_reg (GET_MODE (val), val);
7271 if (VECTOR_MEM_VSX_P (mode))
7273 rtx insn = NULL_RTX;
7274 rtx elt_rtx = GEN_INT (elt);
7276 if (mode == V2DFmode)
7277 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7279 else if (mode == V2DImode)
7280 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7282 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7284 if (mode == V4SImode)
7285 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7286 else if (mode == V8HImode)
7287 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7288 else if (mode == V16QImode)
7289 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7290 else if (mode == V4SFmode)
7291 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7294 if (insn)
7296 emit_insn (insn);
7297 return;
7301 /* Simplify setting single element vectors like V1TImode. */
7302 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7304 emit_move_insn (target, gen_lowpart (mode, val));
7305 return;
7308 /* Load single variable value. */
7309 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7310 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7311 x = gen_rtx_UNSPEC (VOIDmode,
7312 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7313 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7314 gen_rtvec (2,
7315 gen_rtx_SET (reg, mem),
7316 x)));
7318 /* Linear sequence. */
7319 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7320 for (i = 0; i < 16; ++i)
7321 XVECEXP (mask, 0, i) = GEN_INT (i);
7323 /* Set permute mask to insert element into target. */
7324 for (i = 0; i < width; ++i)
7325 XVECEXP (mask, 0, elt*width + i)
7326 = GEN_INT (i + 0x10);
7327 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7329 if (BYTES_BIG_ENDIAN)
7330 x = gen_rtx_UNSPEC (mode,
7331 gen_rtvec (3, target, reg,
7332 force_reg (V16QImode, x)),
7333 UNSPEC_VPERM);
7334 else
7336 if (TARGET_P9_VECTOR)
7337 x = gen_rtx_UNSPEC (mode,
7338 gen_rtvec (3, target, reg,
7339 force_reg (V16QImode, x)),
7340 UNSPEC_VPERMR);
7341 else
7343 /* Invert selector. We prefer to generate VNAND on P8 so
7344 that future fusion opportunities can kick in, but must
7345 generate VNOR elsewhere. */
7346 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7347 rtx iorx = (TARGET_P8_VECTOR
7348 ? gen_rtx_IOR (V16QImode, notx, notx)
7349 : gen_rtx_AND (V16QImode, notx, notx));
7350 rtx tmp = gen_reg_rtx (V16QImode);
7351 emit_insn (gen_rtx_SET (tmp, iorx));
7353 /* Permute with operands reversed and adjusted selector. */
7354 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7355 UNSPEC_VPERM);
7359 emit_insn (gen_rtx_SET (target, x));
7362 /* Extract field ELT from VEC into TARGET. */
7364 void
7365 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7367 machine_mode mode = GET_MODE (vec);
7368 machine_mode inner_mode = GET_MODE_INNER (mode);
7369 rtx mem;
7371 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7373 switch (mode)
7375 default:
7376 break;
7377 case V1TImode:
7378 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7379 emit_move_insn (target, gen_lowpart (TImode, vec));
7380 break;
7381 case V2DFmode:
7382 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7383 return;
7384 case V2DImode:
7385 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7386 return;
7387 case V4SFmode:
7388 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7389 return;
7390 case V16QImode:
7391 if (TARGET_DIRECT_MOVE_64BIT)
7393 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7394 return;
7396 else
7397 break;
7398 case V8HImode:
7399 if (TARGET_DIRECT_MOVE_64BIT)
7401 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7402 return;
7404 else
7405 break;
7406 case V4SImode:
7407 if (TARGET_DIRECT_MOVE_64BIT)
7409 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7410 return;
7412 break;
7415 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7416 && TARGET_DIRECT_MOVE_64BIT)
7418 if (GET_MODE (elt) != DImode)
7420 rtx tmp = gen_reg_rtx (DImode);
7421 convert_move (tmp, elt, 0);
7422 elt = tmp;
7424 else if (!REG_P (elt))
7425 elt = force_reg (DImode, elt);
7427 switch (mode)
7429 case V2DFmode:
7430 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7431 return;
7433 case V2DImode:
7434 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7435 return;
7437 case V4SFmode:
7438 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7439 return;
7441 case V4SImode:
7442 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7443 return;
7445 case V8HImode:
7446 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7447 return;
7449 case V16QImode:
7450 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7451 return;
7453 default:
7454 gcc_unreachable ();
7458 gcc_assert (CONST_INT_P (elt));
7460 /* Allocate mode-sized buffer. */
7461 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7463 emit_move_insn (mem, vec);
7465 /* Add offset to field within buffer matching vector element. */
7466 mem = adjust_address_nv (mem, inner_mode,
7467 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7469 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7472 /* Helper function to return the register number of a RTX. */
7473 static inline int
7474 regno_or_subregno (rtx op)
7476 if (REG_P (op))
7477 return REGNO (op);
7478 else if (SUBREG_P (op))
7479 return subreg_regno (op);
7480 else
7481 gcc_unreachable ();
7484 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7485 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7486 temporary (BASE_TMP) to fixup the address. Return the new memory address
7487 that is valid for reads or writes to a given register (SCALAR_REG). */
7490 rs6000_adjust_vec_address (rtx scalar_reg,
7491 rtx mem,
7492 rtx element,
7493 rtx base_tmp,
7494 machine_mode scalar_mode)
7496 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7497 rtx addr = XEXP (mem, 0);
7498 rtx element_offset;
7499 rtx new_addr;
7500 bool valid_addr_p;
7502 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7503 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7505 /* Calculate what we need to add to the address to get the element
7506 address. */
7507 if (CONST_INT_P (element))
7508 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7509 else
7511 int byte_shift = exact_log2 (scalar_size);
7512 gcc_assert (byte_shift >= 0);
7514 if (byte_shift == 0)
7515 element_offset = element;
7517 else
7519 if (TARGET_POWERPC64)
7520 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7521 else
7522 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7524 element_offset = base_tmp;
7528 /* Create the new address pointing to the element within the vector. If we
7529 are adding 0, we don't have to change the address. */
7530 if (element_offset == const0_rtx)
7531 new_addr = addr;
7533 /* A simple indirect address can be converted into a reg + offset
7534 address. */
7535 else if (REG_P (addr) || SUBREG_P (addr))
7536 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7538 /* Optimize D-FORM addresses with constant offset with a constant element, to
7539 include the element offset in the address directly. */
7540 else if (GET_CODE (addr) == PLUS)
7542 rtx op0 = XEXP (addr, 0);
7543 rtx op1 = XEXP (addr, 1);
7544 rtx insn;
7546 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7547 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7549 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7550 rtx offset_rtx = GEN_INT (offset);
7552 if (IN_RANGE (offset, -32768, 32767)
7553 && (scalar_size < 8 || (offset & 0x3) == 0))
7554 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7555 else
7557 emit_move_insn (base_tmp, offset_rtx);
7558 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7561 else
7563 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7564 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7566 /* Note, ADDI requires the register being added to be a base
7567 register. If the register was R0, load it up into the temporary
7568 and do the add. */
7569 if (op1_reg_p
7570 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7572 insn = gen_add3_insn (base_tmp, op1, element_offset);
7573 gcc_assert (insn != NULL_RTX);
7574 emit_insn (insn);
7577 else if (ele_reg_p
7578 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7580 insn = gen_add3_insn (base_tmp, element_offset, op1);
7581 gcc_assert (insn != NULL_RTX);
7582 emit_insn (insn);
7585 else
7587 emit_move_insn (base_tmp, op1);
7588 emit_insn (gen_add2_insn (base_tmp, element_offset));
7591 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7595 else
7597 emit_move_insn (base_tmp, addr);
7598 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7601 /* If we have a PLUS, we need to see whether the particular register class
7602 allows for D-FORM or X-FORM addressing. */
7603 if (GET_CODE (new_addr) == PLUS)
7605 rtx op1 = XEXP (new_addr, 1);
7606 addr_mask_type addr_mask;
7607 int scalar_regno = regno_or_subregno (scalar_reg);
7609 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7610 if (INT_REGNO_P (scalar_regno))
7611 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7613 else if (FP_REGNO_P (scalar_regno))
7614 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7616 else if (ALTIVEC_REGNO_P (scalar_regno))
7617 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7619 else
7620 gcc_unreachable ();
7622 if (REG_P (op1) || SUBREG_P (op1))
7623 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7624 else
7625 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7628 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7629 valid_addr_p = true;
7631 else
7632 valid_addr_p = false;
7634 if (!valid_addr_p)
7636 emit_move_insn (base_tmp, new_addr);
7637 new_addr = base_tmp;
7640 return change_address (mem, scalar_mode, new_addr);
7643 /* Split a variable vec_extract operation into the component instructions. */
7645 void
7646 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7647 rtx tmp_altivec)
7649 machine_mode mode = GET_MODE (src);
7650 machine_mode scalar_mode = GET_MODE (dest);
7651 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7652 int byte_shift = exact_log2 (scalar_size);
7654 gcc_assert (byte_shift >= 0);
7656 /* If we are given a memory address, optimize to load just the element. We
7657 don't have to adjust the vector element number on little endian
7658 systems. */
7659 if (MEM_P (src))
7661 gcc_assert (REG_P (tmp_gpr));
7662 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7663 tmp_gpr, scalar_mode));
7664 return;
7667 else if (REG_P (src) || SUBREG_P (src))
7669 int bit_shift = byte_shift + 3;
7670 rtx element2;
7671 int dest_regno = regno_or_subregno (dest);
7672 int src_regno = regno_or_subregno (src);
7673 int element_regno = regno_or_subregno (element);
7675 gcc_assert (REG_P (tmp_gpr));
7677 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7678 a general purpose register. */
7679 if (TARGET_P9_VECTOR
7680 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7681 && INT_REGNO_P (dest_regno)
7682 && ALTIVEC_REGNO_P (src_regno)
7683 && INT_REGNO_P (element_regno))
7685 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7686 rtx element_si = gen_rtx_REG (SImode, element_regno);
7688 if (mode == V16QImode)
7689 emit_insn (VECTOR_ELT_ORDER_BIG
7690 ? gen_vextublx (dest_si, element_si, src)
7691 : gen_vextubrx (dest_si, element_si, src));
7693 else if (mode == V8HImode)
7695 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7696 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7697 emit_insn (VECTOR_ELT_ORDER_BIG
7698 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7699 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7703 else
7705 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7706 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7707 emit_insn (VECTOR_ELT_ORDER_BIG
7708 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7709 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7712 return;
7716 gcc_assert (REG_P (tmp_altivec));
7718 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7719 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7720 will shift the element into the upper position (adding 3 to convert a
7721 byte shift into a bit shift). */
7722 if (scalar_size == 8)
7724 if (!VECTOR_ELT_ORDER_BIG)
7726 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7727 element2 = tmp_gpr;
7729 else
7730 element2 = element;
7732 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7733 bit. */
7734 emit_insn (gen_rtx_SET (tmp_gpr,
7735 gen_rtx_AND (DImode,
7736 gen_rtx_ASHIFT (DImode,
7737 element2,
7738 GEN_INT (6)),
7739 GEN_INT (64))));
7741 else
7743 if (!VECTOR_ELT_ORDER_BIG)
7745 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7747 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7748 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7749 element2 = tmp_gpr;
7751 else
7752 element2 = element;
7754 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7757 /* Get the value into the lower byte of the Altivec register where VSLO
7758 expects it. */
7759 if (TARGET_P9_VECTOR)
7760 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7761 else if (can_create_pseudo_p ())
7762 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7763 else
7765 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7766 emit_move_insn (tmp_di, tmp_gpr);
7767 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7770 /* Do the VSLO to get the value into the final location. */
7771 switch (mode)
7773 case V2DFmode:
7774 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7775 return;
7777 case V2DImode:
7778 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7779 return;
7781 case V4SFmode:
7783 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7784 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7785 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7786 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7787 tmp_altivec));
7789 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7790 return;
7793 case V4SImode:
7794 case V8HImode:
7795 case V16QImode:
7797 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7798 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7799 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7800 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7801 tmp_altivec));
7802 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7803 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7804 GEN_INT (64 - (8 * scalar_size))));
7805 return;
7808 default:
7809 gcc_unreachable ();
7812 return;
7814 else
7815 gcc_unreachable ();
7818 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7819 two SImode values. */
7821 static void
7822 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7824 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7826 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7828 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7829 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7831 emit_move_insn (dest, GEN_INT (const1 | const2));
7832 return;
7835 /* Put si1 into upper 32-bits of dest. */
7836 if (CONST_INT_P (si1))
7837 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7838 else
7840 /* Generate RLDIC. */
7841 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7842 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7843 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7844 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7845 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7846 emit_insn (gen_rtx_SET (dest, and_rtx));
7849 /* Put si2 into the temporary. */
7850 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7851 if (CONST_INT_P (si2))
7852 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7853 else
7854 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7856 /* Combine the two parts. */
7857 emit_insn (gen_iordi3 (dest, dest, tmp));
7858 return;
7861 /* Split a V4SI initialization. */
7863 void
7864 rs6000_split_v4si_init (rtx operands[])
7866 rtx dest = operands[0];
7868 /* Destination is a GPR, build up the two DImode parts in place. */
7869 if (REG_P (dest) || SUBREG_P (dest))
7871 int d_regno = regno_or_subregno (dest);
7872 rtx scalar1 = operands[1];
7873 rtx scalar2 = operands[2];
7874 rtx scalar3 = operands[3];
7875 rtx scalar4 = operands[4];
7876 rtx tmp1 = operands[5];
7877 rtx tmp2 = operands[6];
7879 /* Even though we only need one temporary (plus the destination, which
7880 has an early clobber constraint, try to use two temporaries, one for
7881 each double word created. That way the 2nd insn scheduling pass can
7882 rearrange things so the two parts are done in parallel. */
7883 if (BYTES_BIG_ENDIAN)
7885 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7886 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7887 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7888 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7890 else
7892 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7893 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7894 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7895 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7896 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7898 return;
7901 else
7902 gcc_unreachable ();
7905 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7906 selects whether the alignment is abi mandated, optional, or
7907 both abi and optional alignment. */
7909 unsigned int
7910 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7912 if (how != align_opt)
7914 if (TREE_CODE (type) == VECTOR_TYPE)
7916 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7918 if (align < 64)
7919 align = 64;
7921 else if (align < 128)
7922 align = 128;
7926 if (how != align_abi)
7928 if (TREE_CODE (type) == ARRAY_TYPE
7929 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7931 if (align < BITS_PER_WORD)
7932 align = BITS_PER_WORD;
7936 return align;
7939 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7941 bool
7942 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7944 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7946 if (computed != 128)
7948 static bool warned;
7949 if (!warned && warn_psabi)
7951 warned = true;
7952 inform (input_location,
7953 "the layout of aggregates containing vectors with"
7954 " %d-byte alignment has changed in GCC 5",
7955 computed / BITS_PER_UNIT);
7958 /* In current GCC there is no special case. */
7959 return false;
7962 return false;
7965 /* AIX increases natural record alignment to doubleword if the first
7966 field is an FP double while the FP fields remain word aligned. */
7968 unsigned int
7969 rs6000_special_round_type_align (tree type, unsigned int computed,
7970 unsigned int specified)
7972 unsigned int align = MAX (computed, specified);
7973 tree field = TYPE_FIELDS (type);
7975 /* Skip all non field decls */
7976 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7977 field = DECL_CHAIN (field);
7979 if (field != NULL && field != type)
7981 type = TREE_TYPE (field);
7982 while (TREE_CODE (type) == ARRAY_TYPE)
7983 type = TREE_TYPE (type);
7985 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7986 align = MAX (align, 64);
7989 return align;
7992 /* Darwin increases record alignment to the natural alignment of
7993 the first field. */
7995 unsigned int
7996 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7997 unsigned int specified)
7999 unsigned int align = MAX (computed, specified);
8001 if (TYPE_PACKED (type))
8002 return align;
8004 /* Find the first field, looking down into aggregates. */
8005 do {
8006 tree field = TYPE_FIELDS (type);
8007 /* Skip all non field decls */
8008 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
8009 field = DECL_CHAIN (field);
8010 if (! field)
8011 break;
8012 /* A packed field does not contribute any extra alignment. */
8013 if (DECL_PACKED (field))
8014 return align;
8015 type = TREE_TYPE (field);
8016 while (TREE_CODE (type) == ARRAY_TYPE)
8017 type = TREE_TYPE (type);
8018 } while (AGGREGATE_TYPE_P (type));
8020 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
8021 align = MAX (align, TYPE_ALIGN (type));
8023 return align;
8026 /* Return 1 for an operand in small memory on V.4/eabi. */
8029 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8030 machine_mode mode ATTRIBUTE_UNUSED)
8032 #if TARGET_ELF
8033 rtx sym_ref;
8035 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8036 return 0;
8038 if (DEFAULT_ABI != ABI_V4)
8039 return 0;
8041 if (GET_CODE (op) == SYMBOL_REF)
8042 sym_ref = op;
8044 else if (GET_CODE (op) != CONST
8045 || GET_CODE (XEXP (op, 0)) != PLUS
8046 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8047 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8048 return 0;
8050 else
8052 rtx sum = XEXP (op, 0);
8053 HOST_WIDE_INT summand;
8055 /* We have to be careful here, because it is the referenced address
8056 that must be 32k from _SDA_BASE_, not just the symbol. */
8057 summand = INTVAL (XEXP (sum, 1));
8058 if (summand < 0 || summand > g_switch_value)
8059 return 0;
8061 sym_ref = XEXP (sum, 0);
8064 return SYMBOL_REF_SMALL_P (sym_ref);
8065 #else
8066 return 0;
8067 #endif
8070 /* Return true if either operand is a general purpose register. */
8072 bool
8073 gpr_or_gpr_p (rtx op0, rtx op1)
8075 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8076 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8079 /* Return true if this is a move direct operation between GPR registers and
8080 floating point/VSX registers. */
8082 bool
8083 direct_move_p (rtx op0, rtx op1)
8085 int regno0, regno1;
8087 if (!REG_P (op0) || !REG_P (op1))
8088 return false;
8090 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8091 return false;
8093 regno0 = REGNO (op0);
8094 regno1 = REGNO (op1);
8095 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8096 return false;
8098 if (INT_REGNO_P (regno0))
8099 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8101 else if (INT_REGNO_P (regno1))
8103 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8104 return true;
8106 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8107 return true;
8110 return false;
8113 /* Return true if the OFFSET is valid for the quad address instructions that
8114 use d-form (register + offset) addressing. */
8116 static inline bool
8117 quad_address_offset_p (HOST_WIDE_INT offset)
8119 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8122 /* Return true if the ADDR is an acceptable address for a quad memory
8123 operation of mode MODE (either LQ/STQ for general purpose registers, or
8124 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8125 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8126 3.0 LXV/STXV instruction. */
8128 bool
8129 quad_address_p (rtx addr, machine_mode mode, bool strict)
8131 rtx op0, op1;
8133 if (GET_MODE_SIZE (mode) != 16)
8134 return false;
8136 if (legitimate_indirect_address_p (addr, strict))
8137 return true;
8139 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8140 return false;
8142 if (GET_CODE (addr) != PLUS)
8143 return false;
8145 op0 = XEXP (addr, 0);
8146 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8147 return false;
8149 op1 = XEXP (addr, 1);
8150 if (!CONST_INT_P (op1))
8151 return false;
8153 return quad_address_offset_p (INTVAL (op1));
8156 /* Return true if this is a load or store quad operation. This function does
8157 not handle the atomic quad memory instructions. */
8159 bool
8160 quad_load_store_p (rtx op0, rtx op1)
8162 bool ret;
8164 if (!TARGET_QUAD_MEMORY)
8165 ret = false;
8167 else if (REG_P (op0) && MEM_P (op1))
8168 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8169 && quad_memory_operand (op1, GET_MODE (op1))
8170 && !reg_overlap_mentioned_p (op0, op1));
8172 else if (MEM_P (op0) && REG_P (op1))
8173 ret = (quad_memory_operand (op0, GET_MODE (op0))
8174 && quad_int_reg_operand (op1, GET_MODE (op1)));
8176 else
8177 ret = false;
8179 if (TARGET_DEBUG_ADDR)
8181 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8182 ret ? "true" : "false");
8183 debug_rtx (gen_rtx_SET (op0, op1));
8186 return ret;
8189 /* Given an address, return a constant offset term if one exists. */
8191 static rtx
8192 address_offset (rtx op)
8194 if (GET_CODE (op) == PRE_INC
8195 || GET_CODE (op) == PRE_DEC)
8196 op = XEXP (op, 0);
8197 else if (GET_CODE (op) == PRE_MODIFY
8198 || GET_CODE (op) == LO_SUM)
8199 op = XEXP (op, 1);
8201 if (GET_CODE (op) == CONST)
8202 op = XEXP (op, 0);
8204 if (GET_CODE (op) == PLUS)
8205 op = XEXP (op, 1);
8207 if (CONST_INT_P (op))
8208 return op;
8210 return NULL_RTX;
8213 /* Return true if the MEM operand is a memory operand suitable for use
8214 with a (full width, possibly multiple) gpr load/store. On
8215 powerpc64 this means the offset must be divisible by 4.
8216 Implements 'Y' constraint.
8218 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8219 a constraint function we know the operand has satisfied a suitable
8220 memory predicate. Also accept some odd rtl generated by reload
8221 (see rs6000_legitimize_reload_address for various forms). It is
8222 important that reload rtl be accepted by appropriate constraints
8223 but not by the operand predicate.
8225 Offsetting a lo_sum should not be allowed, except where we know by
8226 alignment that a 32k boundary is not crossed, but see the ???
8227 comment in rs6000_legitimize_reload_address. Note that by
8228 "offsetting" here we mean a further offset to access parts of the
8229 MEM. It's fine to have a lo_sum where the inner address is offset
8230 from a sym, since the same sym+offset will appear in the high part
8231 of the address calculation. */
8233 bool
8234 mem_operand_gpr (rtx op, machine_mode mode)
8236 unsigned HOST_WIDE_INT offset;
8237 int extra;
8238 rtx addr = XEXP (op, 0);
8240 op = address_offset (addr);
8241 if (op == NULL_RTX)
8242 return true;
8244 offset = INTVAL (op);
8245 if (TARGET_POWERPC64 && (offset & 3) != 0)
8246 return false;
8248 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8249 if (extra < 0)
8250 extra = 0;
8252 if (GET_CODE (addr) == LO_SUM)
8253 /* For lo_sum addresses, we must allow any offset except one that
8254 causes a wrap, so test only the low 16 bits. */
8255 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8257 return offset + 0x8000 < 0x10000u - extra;
8260 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8261 enforce an offset divisible by 4 even for 32-bit. */
8263 bool
8264 mem_operand_ds_form (rtx op, machine_mode mode)
8266 unsigned HOST_WIDE_INT offset;
8267 int extra;
8268 rtx addr = XEXP (op, 0);
8270 if (!offsettable_address_p (false, mode, addr))
8271 return false;
8273 op = address_offset (addr);
8274 if (op == NULL_RTX)
8275 return true;
8277 offset = INTVAL (op);
8278 if ((offset & 3) != 0)
8279 return false;
8281 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8282 if (extra < 0)
8283 extra = 0;
8285 if (GET_CODE (addr) == LO_SUM)
8286 /* For lo_sum addresses, we must allow any offset except one that
8287 causes a wrap, so test only the low 16 bits. */
8288 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8290 return offset + 0x8000 < 0x10000u - extra;
8293 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8295 static bool
8296 reg_offset_addressing_ok_p (machine_mode mode)
8298 switch (mode)
8300 case V16QImode:
8301 case V8HImode:
8302 case V4SFmode:
8303 case V4SImode:
8304 case V2DFmode:
8305 case V2DImode:
8306 case V1TImode:
8307 case TImode:
8308 case TFmode:
8309 case KFmode:
8310 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8311 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8312 a vector mode, if we want to use the VSX registers to move it around,
8313 we need to restrict ourselves to reg+reg addressing. Similarly for
8314 IEEE 128-bit floating point that is passed in a single vector
8315 register. */
8316 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8317 return mode_supports_vsx_dform_quad (mode);
8318 break;
8320 case V2SImode:
8321 case V2SFmode:
8322 /* Paired vector modes. Only reg+reg addressing is valid. */
8323 if (TARGET_PAIRED_FLOAT)
8324 return false;
8325 break;
8327 case SDmode:
8328 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8329 addressing for the LFIWZX and STFIWX instructions. */
8330 if (TARGET_NO_SDMODE_STACK)
8331 return false;
8332 break;
8334 default:
8335 break;
8338 return true;
8341 static bool
8342 virtual_stack_registers_memory_p (rtx op)
8344 int regnum;
8346 if (GET_CODE (op) == REG)
8347 regnum = REGNO (op);
8349 else if (GET_CODE (op) == PLUS
8350 && GET_CODE (XEXP (op, 0)) == REG
8351 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8352 regnum = REGNO (XEXP (op, 0));
8354 else
8355 return false;
8357 return (regnum >= FIRST_VIRTUAL_REGISTER
8358 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8361 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8362 is known to not straddle a 32k boundary. This function is used
8363 to determine whether -mcmodel=medium code can use TOC pointer
8364 relative addressing for OP. This means the alignment of the TOC
8365 pointer must also be taken into account, and unfortunately that is
8366 only 8 bytes. */
8368 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8369 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8370 #endif
8372 static bool
8373 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8374 machine_mode mode)
8376 tree decl;
8377 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8379 if (GET_CODE (op) != SYMBOL_REF)
8380 return false;
8382 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8383 SYMBOL_REF. */
8384 if (mode_supports_vsx_dform_quad (mode))
8385 return false;
8387 dsize = GET_MODE_SIZE (mode);
8388 decl = SYMBOL_REF_DECL (op);
8389 if (!decl)
8391 if (dsize == 0)
8392 return false;
8394 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8395 replacing memory addresses with an anchor plus offset. We
8396 could find the decl by rummaging around in the block->objects
8397 VEC for the given offset but that seems like too much work. */
8398 dalign = BITS_PER_UNIT;
8399 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8400 && SYMBOL_REF_ANCHOR_P (op)
8401 && SYMBOL_REF_BLOCK (op) != NULL)
8403 struct object_block *block = SYMBOL_REF_BLOCK (op);
8405 dalign = block->alignment;
8406 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8408 else if (CONSTANT_POOL_ADDRESS_P (op))
8410 /* It would be nice to have get_pool_align().. */
8411 machine_mode cmode = get_pool_mode (op);
8413 dalign = GET_MODE_ALIGNMENT (cmode);
8416 else if (DECL_P (decl))
8418 dalign = DECL_ALIGN (decl);
8420 if (dsize == 0)
8422 /* Allow BLKmode when the entire object is known to not
8423 cross a 32k boundary. */
8424 if (!DECL_SIZE_UNIT (decl))
8425 return false;
8427 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8428 return false;
8430 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8431 if (dsize > 32768)
8432 return false;
8434 dalign /= BITS_PER_UNIT;
8435 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8436 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8437 return dalign >= dsize;
8440 else
8441 gcc_unreachable ();
8443 /* Find how many bits of the alignment we know for this access. */
8444 dalign /= BITS_PER_UNIT;
8445 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8446 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8447 mask = dalign - 1;
8448 lsb = offset & -offset;
8449 mask &= lsb - 1;
8450 dalign = mask + 1;
8452 return dalign >= dsize;
8455 static bool
8456 constant_pool_expr_p (rtx op)
8458 rtx base, offset;
8460 split_const (op, &base, &offset);
8461 return (GET_CODE (base) == SYMBOL_REF
8462 && CONSTANT_POOL_ADDRESS_P (base)
8463 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8466 /* These are only used to pass through from print_operand/print_operand_address
8467 to rs6000_output_addr_const_extra over the intervening function
8468 output_addr_const which is not target code. */
8469 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8471 /* Return true if OP is a toc pointer relative address (the output
8472 of create_TOC_reference). If STRICT, do not match non-split
8473 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8474 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8475 TOCREL_OFFSET_RET respectively. */
8477 bool
8478 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8479 const_rtx *tocrel_offset_ret)
8481 if (!TARGET_TOC)
8482 return false;
8484 if (TARGET_CMODEL != CMODEL_SMALL)
8486 /* When strict ensure we have everything tidy. */
8487 if (strict
8488 && !(GET_CODE (op) == LO_SUM
8489 && REG_P (XEXP (op, 0))
8490 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8491 return false;
8493 /* When not strict, allow non-split TOC addresses and also allow
8494 (lo_sum (high ..)) TOC addresses created during reload. */
8495 if (GET_CODE (op) == LO_SUM)
8496 op = XEXP (op, 1);
8499 const_rtx tocrel_base = op;
8500 const_rtx tocrel_offset = const0_rtx;
8502 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8504 tocrel_base = XEXP (op, 0);
8505 tocrel_offset = XEXP (op, 1);
8508 if (tocrel_base_ret)
8509 *tocrel_base_ret = tocrel_base;
8510 if (tocrel_offset_ret)
8511 *tocrel_offset_ret = tocrel_offset;
8513 return (GET_CODE (tocrel_base) == UNSPEC
8514 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8517 /* Return true if X is a constant pool address, and also for cmodel=medium
8518 if X is a toc-relative address known to be offsettable within MODE. */
8520 bool
8521 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8522 bool strict)
8524 const_rtx tocrel_base, tocrel_offset;
8525 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8526 && (TARGET_CMODEL != CMODEL_MEDIUM
8527 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8528 || mode == QImode
8529 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8530 INTVAL (tocrel_offset), mode)));
8533 static bool
8534 legitimate_small_data_p (machine_mode mode, rtx x)
8536 return (DEFAULT_ABI == ABI_V4
8537 && !flag_pic && !TARGET_TOC
8538 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8539 && small_data_operand (x, mode));
8542 bool
8543 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8544 bool strict, bool worst_case)
8546 unsigned HOST_WIDE_INT offset;
8547 unsigned int extra;
8549 if (GET_CODE (x) != PLUS)
8550 return false;
8551 if (!REG_P (XEXP (x, 0)))
8552 return false;
8553 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8554 return false;
8555 if (mode_supports_vsx_dform_quad (mode))
8556 return quad_address_p (x, mode, strict);
8557 if (!reg_offset_addressing_ok_p (mode))
8558 return virtual_stack_registers_memory_p (x);
8559 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8560 return true;
8561 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8562 return false;
8564 offset = INTVAL (XEXP (x, 1));
8565 extra = 0;
8566 switch (mode)
8568 case V2SImode:
8569 case V2SFmode:
8570 /* Paired single modes: offset addressing isn't valid. */
8571 return false;
8573 case DFmode:
8574 case DDmode:
8575 case DImode:
8576 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8577 addressing. */
8578 if (VECTOR_MEM_VSX_P (mode))
8579 return false;
8581 if (!worst_case)
8582 break;
8583 if (!TARGET_POWERPC64)
8584 extra = 4;
8585 else if (offset & 3)
8586 return false;
8587 break;
8589 case TFmode:
8590 case IFmode:
8591 case KFmode:
8592 case TDmode:
8593 case TImode:
8594 case PTImode:
8595 extra = 8;
8596 if (!worst_case)
8597 break;
8598 if (!TARGET_POWERPC64)
8599 extra = 12;
8600 else if (offset & 3)
8601 return false;
8602 break;
8604 default:
8605 break;
8608 offset += 0x8000;
8609 return offset < 0x10000 - extra;
8612 bool
8613 legitimate_indexed_address_p (rtx x, int strict)
8615 rtx op0, op1;
8617 if (GET_CODE (x) != PLUS)
8618 return false;
8620 op0 = XEXP (x, 0);
8621 op1 = XEXP (x, 1);
8623 return (REG_P (op0) && REG_P (op1)
8624 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8625 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8626 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8627 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8630 bool
8631 avoiding_indexed_address_p (machine_mode mode)
8633 /* Avoid indexed addressing for modes that have non-indexed
8634 load/store instruction forms. */
8635 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8638 bool
8639 legitimate_indirect_address_p (rtx x, int strict)
8641 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8644 bool
8645 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8647 if (!TARGET_MACHO || !flag_pic
8648 || mode != SImode || GET_CODE (x) != MEM)
8649 return false;
8650 x = XEXP (x, 0);
8652 if (GET_CODE (x) != LO_SUM)
8653 return false;
8654 if (GET_CODE (XEXP (x, 0)) != REG)
8655 return false;
8656 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8657 return false;
8658 x = XEXP (x, 1);
8660 return CONSTANT_P (x);
8663 static bool
8664 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8666 if (GET_CODE (x) != LO_SUM)
8667 return false;
8668 if (GET_CODE (XEXP (x, 0)) != REG)
8669 return false;
8670 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8671 return false;
8672 /* quad word addresses are restricted, and we can't use LO_SUM. */
8673 if (mode_supports_vsx_dform_quad (mode))
8674 return false;
8675 x = XEXP (x, 1);
8677 if (TARGET_ELF || TARGET_MACHO)
8679 bool large_toc_ok;
8681 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8682 return false;
8683 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8684 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8685 recognizes some LO_SUM addresses as valid although this
8686 function says opposite. In most cases, LRA through different
8687 transformations can generate correct code for address reloads.
8688 It can not manage only some LO_SUM cases. So we need to add
8689 code analogous to one in rs6000_legitimize_reload_address for
8690 LOW_SUM here saying that some addresses are still valid. */
8691 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8692 && small_toc_ref (x, VOIDmode));
8693 if (TARGET_TOC && ! large_toc_ok)
8694 return false;
8695 if (GET_MODE_NUNITS (mode) != 1)
8696 return false;
8697 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8698 && !(/* ??? Assume floating point reg based on mode? */
8699 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8700 && (mode == DFmode || mode == DDmode)))
8701 return false;
8703 return CONSTANT_P (x) || large_toc_ok;
8706 return false;
8710 /* Try machine-dependent ways of modifying an illegitimate address
8711 to be legitimate. If we find one, return the new, valid address.
8712 This is used from only one place: `memory_address' in explow.c.
8714 OLDX is the address as it was before break_out_memory_refs was
8715 called. In some cases it is useful to look at this to decide what
8716 needs to be done.
8718 It is always safe for this function to do nothing. It exists to
8719 recognize opportunities to optimize the output.
8721 On RS/6000, first check for the sum of a register with a constant
8722 integer that is out of range. If so, generate code to add the
8723 constant with the low-order 16 bits masked to the register and force
8724 this result into another register (this can be done with `cau').
8725 Then generate an address of REG+(CONST&0xffff), allowing for the
8726 possibility of bit 16 being a one.
8728 Then check for the sum of a register and something not constant, try to
8729 load the other things into a register and return the sum. */
8731 static rtx
8732 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8733 machine_mode mode)
8735 unsigned int extra;
8737 if (!reg_offset_addressing_ok_p (mode)
8738 || mode_supports_vsx_dform_quad (mode))
8740 if (virtual_stack_registers_memory_p (x))
8741 return x;
8743 /* In theory we should not be seeing addresses of the form reg+0,
8744 but just in case it is generated, optimize it away. */
8745 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8746 return force_reg (Pmode, XEXP (x, 0));
8748 /* For TImode with load/store quad, restrict addresses to just a single
8749 pointer, so it works with both GPRs and VSX registers. */
8750 /* Make sure both operands are registers. */
8751 else if (GET_CODE (x) == PLUS
8752 && (mode != TImode || !TARGET_VSX_TIMODE))
8753 return gen_rtx_PLUS (Pmode,
8754 force_reg (Pmode, XEXP (x, 0)),
8755 force_reg (Pmode, XEXP (x, 1)));
8756 else
8757 return force_reg (Pmode, x);
8759 if (GET_CODE (x) == SYMBOL_REF)
8761 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8762 if (model != 0)
8763 return rs6000_legitimize_tls_address (x, model);
8766 extra = 0;
8767 switch (mode)
8769 case TFmode:
8770 case TDmode:
8771 case TImode:
8772 case PTImode:
8773 case IFmode:
8774 case KFmode:
8775 /* As in legitimate_offset_address_p we do not assume
8776 worst-case. The mode here is just a hint as to the registers
8777 used. A TImode is usually in gprs, but may actually be in
8778 fprs. Leave worst-case scenario for reload to handle via
8779 insn constraints. PTImode is only GPRs. */
8780 extra = 8;
8781 break;
8782 default:
8783 break;
8786 if (GET_CODE (x) == PLUS
8787 && GET_CODE (XEXP (x, 0)) == REG
8788 && GET_CODE (XEXP (x, 1)) == CONST_INT
8789 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8790 >= 0x10000 - extra)
8791 && !PAIRED_VECTOR_MODE (mode))
8793 HOST_WIDE_INT high_int, low_int;
8794 rtx sum;
8795 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8796 if (low_int >= 0x8000 - extra)
8797 low_int = 0;
8798 high_int = INTVAL (XEXP (x, 1)) - low_int;
8799 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8800 GEN_INT (high_int)), 0);
8801 return plus_constant (Pmode, sum, low_int);
8803 else if (GET_CODE (x) == PLUS
8804 && GET_CODE (XEXP (x, 0)) == REG
8805 && GET_CODE (XEXP (x, 1)) != CONST_INT
8806 && GET_MODE_NUNITS (mode) == 1
8807 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8808 || (/* ??? Assume floating point reg based on mode? */
8809 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8810 && (mode == DFmode || mode == DDmode)))
8811 && !avoiding_indexed_address_p (mode))
8813 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8814 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8816 else if (PAIRED_VECTOR_MODE (mode))
8818 if (mode == DImode)
8819 return x;
8820 /* We accept [reg + reg]. */
8822 if (GET_CODE (x) == PLUS)
8824 rtx op1 = XEXP (x, 0);
8825 rtx op2 = XEXP (x, 1);
8826 rtx y;
8828 op1 = force_reg (Pmode, op1);
8829 op2 = force_reg (Pmode, op2);
8831 /* We can't always do [reg + reg] for these, because [reg +
8832 reg + offset] is not a legitimate addressing mode. */
8833 y = gen_rtx_PLUS (Pmode, op1, op2);
8835 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8836 return force_reg (Pmode, y);
8837 else
8838 return y;
8841 return force_reg (Pmode, x);
8843 else if ((TARGET_ELF
8844 #if TARGET_MACHO
8845 || !MACHO_DYNAMIC_NO_PIC_P
8846 #endif
8848 && TARGET_32BIT
8849 && TARGET_NO_TOC
8850 && ! flag_pic
8851 && GET_CODE (x) != CONST_INT
8852 && GET_CODE (x) != CONST_WIDE_INT
8853 && GET_CODE (x) != CONST_DOUBLE
8854 && CONSTANT_P (x)
8855 && GET_MODE_NUNITS (mode) == 1
8856 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8857 || (/* ??? Assume floating point reg based on mode? */
8858 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8859 && (mode == DFmode || mode == DDmode))))
8861 rtx reg = gen_reg_rtx (Pmode);
8862 if (TARGET_ELF)
8863 emit_insn (gen_elf_high (reg, x));
8864 else
8865 emit_insn (gen_macho_high (reg, x));
8866 return gen_rtx_LO_SUM (Pmode, reg, x);
8868 else if (TARGET_TOC
8869 && GET_CODE (x) == SYMBOL_REF
8870 && constant_pool_expr_p (x)
8871 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8872 return create_TOC_reference (x, NULL_RTX);
8873 else
8874 return x;
8877 /* Debug version of rs6000_legitimize_address. */
8878 static rtx
8879 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8881 rtx ret;
8882 rtx_insn *insns;
8884 start_sequence ();
8885 ret = rs6000_legitimize_address (x, oldx, mode);
8886 insns = get_insns ();
8887 end_sequence ();
8889 if (ret != x)
8891 fprintf (stderr,
8892 "\nrs6000_legitimize_address: mode %s, old code %s, "
8893 "new code %s, modified\n",
8894 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8895 GET_RTX_NAME (GET_CODE (ret)));
8897 fprintf (stderr, "Original address:\n");
8898 debug_rtx (x);
8900 fprintf (stderr, "oldx:\n");
8901 debug_rtx (oldx);
8903 fprintf (stderr, "New address:\n");
8904 debug_rtx (ret);
8906 if (insns)
8908 fprintf (stderr, "Insns added:\n");
8909 debug_rtx_list (insns, 20);
8912 else
8914 fprintf (stderr,
8915 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8916 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8918 debug_rtx (x);
8921 if (insns)
8922 emit_insn (insns);
8924 return ret;
8927 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8928 We need to emit DTP-relative relocations. */
8930 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8931 static void
8932 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8934 switch (size)
8936 case 4:
8937 fputs ("\t.long\t", file);
8938 break;
8939 case 8:
8940 fputs (DOUBLE_INT_ASM_OP, file);
8941 break;
8942 default:
8943 gcc_unreachable ();
8945 output_addr_const (file, x);
8946 if (TARGET_ELF)
8947 fputs ("@dtprel+0x8000", file);
8948 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8950 switch (SYMBOL_REF_TLS_MODEL (x))
8952 case 0:
8953 break;
8954 case TLS_MODEL_LOCAL_EXEC:
8955 fputs ("@le", file);
8956 break;
8957 case TLS_MODEL_INITIAL_EXEC:
8958 fputs ("@ie", file);
8959 break;
8960 case TLS_MODEL_GLOBAL_DYNAMIC:
8961 case TLS_MODEL_LOCAL_DYNAMIC:
8962 fputs ("@m", file);
8963 break;
8964 default:
8965 gcc_unreachable ();
8970 /* Return true if X is a symbol that refers to real (rather than emulated)
8971 TLS. */
8973 static bool
8974 rs6000_real_tls_symbol_ref_p (rtx x)
8976 return (GET_CODE (x) == SYMBOL_REF
8977 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8980 /* In the name of slightly smaller debug output, and to cater to
8981 general assembler lossage, recognize various UNSPEC sequences
8982 and turn them back into a direct symbol reference. */
8984 static rtx
8985 rs6000_delegitimize_address (rtx orig_x)
8987 rtx x, y, offset;
8989 orig_x = delegitimize_mem_from_attrs (orig_x);
8990 x = orig_x;
8991 if (MEM_P (x))
8992 x = XEXP (x, 0);
8994 y = x;
8995 if (TARGET_CMODEL != CMODEL_SMALL
8996 && GET_CODE (y) == LO_SUM)
8997 y = XEXP (y, 1);
8999 offset = NULL_RTX;
9000 if (GET_CODE (y) == PLUS
9001 && GET_MODE (y) == Pmode
9002 && CONST_INT_P (XEXP (y, 1)))
9004 offset = XEXP (y, 1);
9005 y = XEXP (y, 0);
9008 if (GET_CODE (y) == UNSPEC
9009 && XINT (y, 1) == UNSPEC_TOCREL)
9011 y = XVECEXP (y, 0, 0);
9013 #ifdef HAVE_AS_TLS
9014 /* Do not associate thread-local symbols with the original
9015 constant pool symbol. */
9016 if (TARGET_XCOFF
9017 && GET_CODE (y) == SYMBOL_REF
9018 && CONSTANT_POOL_ADDRESS_P (y)
9019 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
9020 return orig_x;
9021 #endif
9023 if (offset != NULL_RTX)
9024 y = gen_rtx_PLUS (Pmode, y, offset);
9025 if (!MEM_P (orig_x))
9026 return y;
9027 else
9028 return replace_equiv_address_nv (orig_x, y);
9031 if (TARGET_MACHO
9032 && GET_CODE (orig_x) == LO_SUM
9033 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9035 y = XEXP (XEXP (orig_x, 1), 0);
9036 if (GET_CODE (y) == UNSPEC
9037 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9038 return XVECEXP (y, 0, 0);
9041 return orig_x;
9044 /* Return true if X shouldn't be emitted into the debug info.
9045 The linker doesn't like .toc section references from
9046 .debug_* sections, so reject .toc section symbols. */
9048 static bool
9049 rs6000_const_not_ok_for_debug_p (rtx x)
9051 if (GET_CODE (x) == SYMBOL_REF
9052 && CONSTANT_POOL_ADDRESS_P (x))
9054 rtx c = get_pool_constant (x);
9055 machine_mode cmode = get_pool_mode (x);
9056 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9057 return true;
9060 return false;
9064 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9066 static bool
9067 rs6000_legitimate_combined_insn (rtx_insn *insn)
9069 int icode = INSN_CODE (insn);
9071 /* Reject creating doloop insns. Combine should not be allowed
9072 to create these for a number of reasons:
9073 1) In a nested loop, if combine creates one of these in an
9074 outer loop and the register allocator happens to allocate ctr
9075 to the outer loop insn, then the inner loop can't use ctr.
9076 Inner loops ought to be more highly optimized.
9077 2) Combine often wants to create one of these from what was
9078 originally a three insn sequence, first combining the three
9079 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9080 allocated ctr, the splitter takes use back to the three insn
9081 sequence. It's better to stop combine at the two insn
9082 sequence.
9083 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9084 insns, the register allocator sometimes uses floating point
9085 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9086 jump insn and output reloads are not implemented for jumps,
9087 the ctrsi/ctrdi splitters need to handle all possible cases.
9088 That's a pain, and it gets to be seriously difficult when a
9089 splitter that runs after reload needs memory to transfer from
9090 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9091 for the difficult case. It's better to not create problems
9092 in the first place. */
9093 if (icode != CODE_FOR_nothing
9094 && (icode == CODE_FOR_ctrsi_internal1
9095 || icode == CODE_FOR_ctrdi_internal1
9096 || icode == CODE_FOR_ctrsi_internal2
9097 || icode == CODE_FOR_ctrdi_internal2
9098 || icode == CODE_FOR_ctrsi_internal3
9099 || icode == CODE_FOR_ctrdi_internal3
9100 || icode == CODE_FOR_ctrsi_internal4
9101 || icode == CODE_FOR_ctrdi_internal4))
9102 return false;
9104 return true;
9107 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9109 static GTY(()) rtx rs6000_tls_symbol;
9110 static rtx
9111 rs6000_tls_get_addr (void)
9113 if (!rs6000_tls_symbol)
9114 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9116 return rs6000_tls_symbol;
9119 /* Construct the SYMBOL_REF for TLS GOT references. */
9121 static GTY(()) rtx rs6000_got_symbol;
9122 static rtx
9123 rs6000_got_sym (void)
9125 if (!rs6000_got_symbol)
9127 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9128 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9129 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9132 return rs6000_got_symbol;
9135 /* AIX Thread-Local Address support. */
9137 static rtx
9138 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9140 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9141 const char *name;
9142 char *tlsname;
9144 name = XSTR (addr, 0);
9145 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9146 or the symbol will be in TLS private data section. */
9147 if (name[strlen (name) - 1] != ']'
9148 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9149 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9151 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9152 strcpy (tlsname, name);
9153 strcat (tlsname,
9154 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9155 tlsaddr = copy_rtx (addr);
9156 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9158 else
9159 tlsaddr = addr;
9161 /* Place addr into TOC constant pool. */
9162 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9164 /* Output the TOC entry and create the MEM referencing the value. */
9165 if (constant_pool_expr_p (XEXP (sym, 0))
9166 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9168 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9169 mem = gen_const_mem (Pmode, tocref);
9170 set_mem_alias_set (mem, get_TOC_alias_set ());
9172 else
9173 return sym;
9175 /* Use global-dynamic for local-dynamic. */
9176 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9177 || model == TLS_MODEL_LOCAL_DYNAMIC)
9179 /* Create new TOC reference for @m symbol. */
9180 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9181 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9182 strcpy (tlsname, "*LCM");
9183 strcat (tlsname, name + 3);
9184 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9185 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9186 tocref = create_TOC_reference (modaddr, NULL_RTX);
9187 rtx modmem = gen_const_mem (Pmode, tocref);
9188 set_mem_alias_set (modmem, get_TOC_alias_set ());
9190 rtx modreg = gen_reg_rtx (Pmode);
9191 emit_insn (gen_rtx_SET (modreg, modmem));
9193 tmpreg = gen_reg_rtx (Pmode);
9194 emit_insn (gen_rtx_SET (tmpreg, mem));
9196 dest = gen_reg_rtx (Pmode);
9197 if (TARGET_32BIT)
9198 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9199 else
9200 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9201 return dest;
9203 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9204 else if (TARGET_32BIT)
9206 tlsreg = gen_reg_rtx (SImode);
9207 emit_insn (gen_tls_get_tpointer (tlsreg));
9209 else
9210 tlsreg = gen_rtx_REG (DImode, 13);
9212 /* Load the TOC value into temporary register. */
9213 tmpreg = gen_reg_rtx (Pmode);
9214 emit_insn (gen_rtx_SET (tmpreg, mem));
9215 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9216 gen_rtx_MINUS (Pmode, addr, tlsreg));
9218 /* Add TOC symbol value to TLS pointer. */
9219 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9221 return dest;
9224 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9225 this (thread-local) address. */
9227 static rtx
9228 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9230 rtx dest, insn;
9232 if (TARGET_XCOFF)
9233 return rs6000_legitimize_tls_address_aix (addr, model);
9235 dest = gen_reg_rtx (Pmode);
9236 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9238 rtx tlsreg;
9240 if (TARGET_64BIT)
9242 tlsreg = gen_rtx_REG (Pmode, 13);
9243 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9245 else
9247 tlsreg = gen_rtx_REG (Pmode, 2);
9248 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9250 emit_insn (insn);
9252 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9254 rtx tlsreg, tmp;
9256 tmp = gen_reg_rtx (Pmode);
9257 if (TARGET_64BIT)
9259 tlsreg = gen_rtx_REG (Pmode, 13);
9260 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9262 else
9264 tlsreg = gen_rtx_REG (Pmode, 2);
9265 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9267 emit_insn (insn);
9268 if (TARGET_64BIT)
9269 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9270 else
9271 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9272 emit_insn (insn);
9274 else
9276 rtx r3, got, tga, tmp1, tmp2, call_insn;
9278 /* We currently use relocations like @got@tlsgd for tls, which
9279 means the linker will handle allocation of tls entries, placing
9280 them in the .got section. So use a pointer to the .got section,
9281 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9282 or to secondary GOT sections used by 32-bit -fPIC. */
9283 if (TARGET_64BIT)
9284 got = gen_rtx_REG (Pmode, 2);
9285 else
9287 if (flag_pic == 1)
9288 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9289 else
9291 rtx gsym = rs6000_got_sym ();
9292 got = gen_reg_rtx (Pmode);
9293 if (flag_pic == 0)
9294 rs6000_emit_move (got, gsym, Pmode);
9295 else
9297 rtx mem, lab;
9299 tmp1 = gen_reg_rtx (Pmode);
9300 tmp2 = gen_reg_rtx (Pmode);
9301 mem = gen_const_mem (Pmode, tmp1);
9302 lab = gen_label_rtx ();
9303 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9304 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9305 if (TARGET_LINK_STACK)
9306 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9307 emit_move_insn (tmp2, mem);
9308 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9309 set_unique_reg_note (last, REG_EQUAL, gsym);
9314 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9316 tga = rs6000_tls_get_addr ();
9317 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9318 1, const0_rtx, Pmode);
9320 r3 = gen_rtx_REG (Pmode, 3);
9321 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9323 if (TARGET_64BIT)
9324 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9325 else
9326 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9328 else if (DEFAULT_ABI == ABI_V4)
9329 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9330 else
9331 gcc_unreachable ();
9332 call_insn = last_call_insn ();
9333 PATTERN (call_insn) = insn;
9334 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9335 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9336 pic_offset_table_rtx);
9338 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9340 tga = rs6000_tls_get_addr ();
9341 tmp1 = gen_reg_rtx (Pmode);
9342 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9343 1, const0_rtx, Pmode);
9345 r3 = gen_rtx_REG (Pmode, 3);
9346 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9348 if (TARGET_64BIT)
9349 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9350 else
9351 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9353 else if (DEFAULT_ABI == ABI_V4)
9354 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9355 else
9356 gcc_unreachable ();
9357 call_insn = last_call_insn ();
9358 PATTERN (call_insn) = insn;
9359 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9360 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9361 pic_offset_table_rtx);
9363 if (rs6000_tls_size == 16)
9365 if (TARGET_64BIT)
9366 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9367 else
9368 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9370 else if (rs6000_tls_size == 32)
9372 tmp2 = gen_reg_rtx (Pmode);
9373 if (TARGET_64BIT)
9374 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9375 else
9376 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9377 emit_insn (insn);
9378 if (TARGET_64BIT)
9379 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9380 else
9381 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9383 else
9385 tmp2 = gen_reg_rtx (Pmode);
9386 if (TARGET_64BIT)
9387 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9388 else
9389 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9390 emit_insn (insn);
9391 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9393 emit_insn (insn);
9395 else
9397 /* IE, or 64-bit offset LE. */
9398 tmp2 = gen_reg_rtx (Pmode);
9399 if (TARGET_64BIT)
9400 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9401 else
9402 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9403 emit_insn (insn);
9404 if (TARGET_64BIT)
9405 insn = gen_tls_tls_64 (dest, tmp2, addr);
9406 else
9407 insn = gen_tls_tls_32 (dest, tmp2, addr);
9408 emit_insn (insn);
9412 return dest;
9415 /* Only create the global variable for the stack protect guard if we are using
9416 the global flavor of that guard. */
9417 static tree
9418 rs6000_init_stack_protect_guard (void)
9420 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9421 return default_stack_protect_guard ();
9423 return NULL_TREE;
9426 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9428 static bool
9429 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9431 if (GET_CODE (x) == HIGH
9432 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9433 return true;
9435 /* A TLS symbol in the TOC cannot contain a sum. */
9436 if (GET_CODE (x) == CONST
9437 && GET_CODE (XEXP (x, 0)) == PLUS
9438 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9439 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9440 return true;
9442 /* Do not place an ELF TLS symbol in the constant pool. */
9443 return TARGET_ELF && tls_referenced_p (x);
9446 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9447 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9448 can be addressed relative to the toc pointer. */
9450 static bool
9451 use_toc_relative_ref (rtx sym, machine_mode mode)
9453 return ((constant_pool_expr_p (sym)
9454 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9455 get_pool_mode (sym)))
9456 || (TARGET_CMODEL == CMODEL_MEDIUM
9457 && SYMBOL_REF_LOCAL_P (sym)
9458 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9461 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9462 replace the input X, or the original X if no replacement is called for.
9463 The output parameter *WIN is 1 if the calling macro should goto WIN,
9464 0 if it should not.
9466 For RS/6000, we wish to handle large displacements off a base
9467 register by splitting the addend across an addiu/addis and the mem insn.
9468 This cuts number of extra insns needed from 3 to 1.
9470 On Darwin, we use this to generate code for floating point constants.
9471 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9472 The Darwin code is inside #if TARGET_MACHO because only then are the
9473 machopic_* functions defined. */
9474 static rtx
9475 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9476 int opnum, int type,
9477 int ind_levels ATTRIBUTE_UNUSED, int *win)
9479 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9480 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9482 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9483 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9484 if (reg_offset_p
9485 && opnum == 1
9486 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9487 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9488 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9489 && TARGET_P9_VECTOR)
9490 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9491 && TARGET_P9_VECTOR)))
9492 reg_offset_p = false;
9494 /* We must recognize output that we have already generated ourselves. */
9495 if (GET_CODE (x) == PLUS
9496 && GET_CODE (XEXP (x, 0)) == PLUS
9497 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9498 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9499 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9501 if (TARGET_DEBUG_ADDR)
9503 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9504 debug_rtx (x);
9506 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9507 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9508 opnum, (enum reload_type) type);
9509 *win = 1;
9510 return x;
9513 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9514 if (GET_CODE (x) == LO_SUM
9515 && GET_CODE (XEXP (x, 0)) == HIGH)
9517 if (TARGET_DEBUG_ADDR)
9519 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9520 debug_rtx (x);
9522 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9523 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9524 opnum, (enum reload_type) type);
9525 *win = 1;
9526 return x;
9529 #if TARGET_MACHO
9530 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9531 && GET_CODE (x) == LO_SUM
9532 && GET_CODE (XEXP (x, 0)) == PLUS
9533 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9534 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9535 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9536 && machopic_operand_p (XEXP (x, 1)))
9538 /* Result of previous invocation of this function on Darwin
9539 floating point constant. */
9540 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9541 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9542 opnum, (enum reload_type) type);
9543 *win = 1;
9544 return x;
9546 #endif
9548 if (TARGET_CMODEL != CMODEL_SMALL
9549 && reg_offset_p
9550 && !quad_offset_p
9551 && small_toc_ref (x, VOIDmode))
9553 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9554 x = gen_rtx_LO_SUM (Pmode, hi, x);
9555 if (TARGET_DEBUG_ADDR)
9557 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9558 debug_rtx (x);
9560 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9561 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9562 opnum, (enum reload_type) type);
9563 *win = 1;
9564 return x;
9567 if (GET_CODE (x) == PLUS
9568 && REG_P (XEXP (x, 0))
9569 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9570 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9571 && CONST_INT_P (XEXP (x, 1))
9572 && reg_offset_p
9573 && !PAIRED_VECTOR_MODE (mode)
9574 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9576 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9577 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9578 HOST_WIDE_INT high
9579 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9581 /* Check for 32-bit overflow or quad addresses with one of the
9582 four least significant bits set. */
9583 if (high + low != val
9584 || (quad_offset_p && (low & 0xf)))
9586 *win = 0;
9587 return x;
9590 /* Reload the high part into a base reg; leave the low part
9591 in the mem directly. */
9593 x = gen_rtx_PLUS (GET_MODE (x),
9594 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9595 GEN_INT (high)),
9596 GEN_INT (low));
9598 if (TARGET_DEBUG_ADDR)
9600 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9601 debug_rtx (x);
9603 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9604 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9605 opnum, (enum reload_type) type);
9606 *win = 1;
9607 return x;
9610 if (GET_CODE (x) == SYMBOL_REF
9611 && reg_offset_p
9612 && !quad_offset_p
9613 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9614 && !PAIRED_VECTOR_MODE (mode)
9615 #if TARGET_MACHO
9616 && DEFAULT_ABI == ABI_DARWIN
9617 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9618 && machopic_symbol_defined_p (x)
9619 #else
9620 && DEFAULT_ABI == ABI_V4
9621 && !flag_pic
9622 #endif
9623 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9624 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9625 without fprs.
9626 ??? Assume floating point reg based on mode? This assumption is
9627 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9628 where reload ends up doing a DFmode load of a constant from
9629 mem using two gprs. Unfortunately, at this point reload
9630 hasn't yet selected regs so poking around in reload data
9631 won't help and even if we could figure out the regs reliably,
9632 we'd still want to allow this transformation when the mem is
9633 naturally aligned. Since we say the address is good here, we
9634 can't disable offsets from LO_SUMs in mem_operand_gpr.
9635 FIXME: Allow offset from lo_sum for other modes too, when
9636 mem is sufficiently aligned.
9638 Also disallow this if the type can go in VMX/Altivec registers, since
9639 those registers do not have d-form (reg+offset) address modes. */
9640 && !reg_addr[mode].scalar_in_vmx_p
9641 && mode != TFmode
9642 && mode != TDmode
9643 && mode != IFmode
9644 && mode != KFmode
9645 && (mode != TImode || !TARGET_VSX_TIMODE)
9646 && mode != PTImode
9647 && (mode != DImode || TARGET_POWERPC64)
9648 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9649 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9651 #if TARGET_MACHO
9652 if (flag_pic)
9654 rtx offset = machopic_gen_offset (x);
9655 x = gen_rtx_LO_SUM (GET_MODE (x),
9656 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9657 gen_rtx_HIGH (Pmode, offset)), offset);
9659 else
9660 #endif
9661 x = gen_rtx_LO_SUM (GET_MODE (x),
9662 gen_rtx_HIGH (Pmode, x), x);
9664 if (TARGET_DEBUG_ADDR)
9666 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9667 debug_rtx (x);
9669 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9670 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9671 opnum, (enum reload_type) type);
9672 *win = 1;
9673 return x;
9676 /* Reload an offset address wrapped by an AND that represents the
9677 masking of the lower bits. Strip the outer AND and let reload
9678 convert the offset address into an indirect address. For VSX,
9679 force reload to create the address with an AND in a separate
9680 register, because we can't guarantee an altivec register will
9681 be used. */
9682 if (VECTOR_MEM_ALTIVEC_P (mode)
9683 && GET_CODE (x) == AND
9684 && GET_CODE (XEXP (x, 0)) == PLUS
9685 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9686 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9687 && GET_CODE (XEXP (x, 1)) == CONST_INT
9688 && INTVAL (XEXP (x, 1)) == -16)
9690 x = XEXP (x, 0);
9691 *win = 1;
9692 return x;
9695 if (TARGET_TOC
9696 && reg_offset_p
9697 && !quad_offset_p
9698 && GET_CODE (x) == SYMBOL_REF
9699 && use_toc_relative_ref (x, mode))
9701 x = create_TOC_reference (x, NULL_RTX);
9702 if (TARGET_CMODEL != CMODEL_SMALL)
9704 if (TARGET_DEBUG_ADDR)
9706 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9707 debug_rtx (x);
9709 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9710 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9711 opnum, (enum reload_type) type);
9713 *win = 1;
9714 return x;
9716 *win = 0;
9717 return x;
9720 /* Debug version of rs6000_legitimize_reload_address. */
9721 static rtx
9722 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9723 int opnum, int type,
9724 int ind_levels, int *win)
9726 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9727 ind_levels, win);
9728 fprintf (stderr,
9729 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9730 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9731 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9732 debug_rtx (x);
9734 if (x == ret)
9735 fprintf (stderr, "Same address returned\n");
9736 else if (!ret)
9737 fprintf (stderr, "NULL returned\n");
9738 else
9740 fprintf (stderr, "New address:\n");
9741 debug_rtx (ret);
9744 return ret;
9747 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9748 that is a valid memory address for an instruction.
9749 The MODE argument is the machine mode for the MEM expression
9750 that wants to use this address.
9752 On the RS/6000, there are four valid address: a SYMBOL_REF that
9753 refers to a constant pool entry of an address (or the sum of it
9754 plus a constant), a short (16-bit signed) constant plus a register,
9755 the sum of two registers, or a register indirect, possibly with an
9756 auto-increment. For DFmode, DDmode and DImode with a constant plus
9757 register, we must ensure that both words are addressable or PowerPC64
9758 with offset word aligned.
9760 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9761 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9762 because adjacent memory cells are accessed by adding word-sized offsets
9763 during assembly output. */
9764 static bool
9765 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9767 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9768 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9770 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9771 if (VECTOR_MEM_ALTIVEC_P (mode)
9772 && GET_CODE (x) == AND
9773 && GET_CODE (XEXP (x, 1)) == CONST_INT
9774 && INTVAL (XEXP (x, 1)) == -16)
9775 x = XEXP (x, 0);
9777 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9778 return 0;
9779 if (legitimate_indirect_address_p (x, reg_ok_strict))
9780 return 1;
9781 if (TARGET_UPDATE
9782 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9783 && mode_supports_pre_incdec_p (mode)
9784 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9785 return 1;
9786 /* Handle restricted vector d-form offsets in ISA 3.0. */
9787 if (quad_offset_p)
9789 if (quad_address_p (x, mode, reg_ok_strict))
9790 return 1;
9792 else if (virtual_stack_registers_memory_p (x))
9793 return 1;
9795 else if (reg_offset_p)
9797 if (legitimate_small_data_p (mode, x))
9798 return 1;
9799 if (legitimate_constant_pool_address_p (x, mode,
9800 reg_ok_strict || lra_in_progress))
9801 return 1;
9802 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9803 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9804 return 1;
9807 /* For TImode, if we have TImode in VSX registers, only allow register
9808 indirect addresses. This will allow the values to go in either GPRs
9809 or VSX registers without reloading. The vector types would tend to
9810 go into VSX registers, so we allow REG+REG, while TImode seems
9811 somewhat split, in that some uses are GPR based, and some VSX based. */
9812 /* FIXME: We could loosen this by changing the following to
9813 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
9814 but currently we cannot allow REG+REG addressing for TImode. See
9815 PR72827 for complete details on how this ends up hoodwinking DSE. */
9816 if (mode == TImode && TARGET_VSX_TIMODE)
9817 return 0;
9818 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9819 if (! reg_ok_strict
9820 && reg_offset_p
9821 && GET_CODE (x) == PLUS
9822 && GET_CODE (XEXP (x, 0)) == REG
9823 && (XEXP (x, 0) == virtual_stack_vars_rtx
9824 || XEXP (x, 0) == arg_pointer_rtx)
9825 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9826 return 1;
9827 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9828 return 1;
9829 if (!FLOAT128_2REG_P (mode)
9830 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9831 || TARGET_POWERPC64
9832 || (mode != DFmode && mode != DDmode))
9833 && (TARGET_POWERPC64 || mode != DImode)
9834 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9835 && mode != PTImode
9836 && !avoiding_indexed_address_p (mode)
9837 && legitimate_indexed_address_p (x, reg_ok_strict))
9838 return 1;
9839 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9840 && mode_supports_pre_modify_p (mode)
9841 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9842 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9843 reg_ok_strict, false)
9844 || (!avoiding_indexed_address_p (mode)
9845 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9846 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9847 return 1;
9848 if (reg_offset_p && !quad_offset_p
9849 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9850 return 1;
9851 return 0;
9854 /* Debug version of rs6000_legitimate_address_p. */
9855 static bool
9856 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9857 bool reg_ok_strict)
9859 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9860 fprintf (stderr,
9861 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9862 "strict = %d, reload = %s, code = %s\n",
9863 ret ? "true" : "false",
9864 GET_MODE_NAME (mode),
9865 reg_ok_strict,
9866 (reload_completed ? "after" : "before"),
9867 GET_RTX_NAME (GET_CODE (x)));
9868 debug_rtx (x);
9870 return ret;
9873 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9875 static bool
9876 rs6000_mode_dependent_address_p (const_rtx addr,
9877 addr_space_t as ATTRIBUTE_UNUSED)
9879 return rs6000_mode_dependent_address_ptr (addr);
9882 /* Go to LABEL if ADDR (a legitimate address expression)
9883 has an effect that depends on the machine mode it is used for.
9885 On the RS/6000 this is true of all integral offsets (since AltiVec
9886 and VSX modes don't allow them) or is a pre-increment or decrement.
9888 ??? Except that due to conceptual problems in offsettable_address_p
9889 we can't really report the problems of integral offsets. So leave
9890 this assuming that the adjustable offset must be valid for the
9891 sub-words of a TFmode operand, which is what we had before. */
9893 static bool
9894 rs6000_mode_dependent_address (const_rtx addr)
9896 switch (GET_CODE (addr))
9898 case PLUS:
9899 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9900 is considered a legitimate address before reload, so there
9901 are no offset restrictions in that case. Note that this
9902 condition is safe in strict mode because any address involving
9903 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9904 been rejected as illegitimate. */
9905 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9906 && XEXP (addr, 0) != arg_pointer_rtx
9907 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9909 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9910 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9912 break;
9914 case LO_SUM:
9915 /* Anything in the constant pool is sufficiently aligned that
9916 all bytes have the same high part address. */
9917 return !legitimate_constant_pool_address_p (addr, QImode, false);
9919 /* Auto-increment cases are now treated generically in recog.c. */
9920 case PRE_MODIFY:
9921 return TARGET_UPDATE;
9923 /* AND is only allowed in Altivec loads. */
9924 case AND:
9925 return true;
9927 default:
9928 break;
9931 return false;
9934 /* Debug version of rs6000_mode_dependent_address. */
9935 static bool
9936 rs6000_debug_mode_dependent_address (const_rtx addr)
9938 bool ret = rs6000_mode_dependent_address (addr);
9940 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9941 ret ? "true" : "false");
9942 debug_rtx (addr);
9944 return ret;
9947 /* Implement FIND_BASE_TERM. */
9950 rs6000_find_base_term (rtx op)
9952 rtx base;
9954 base = op;
9955 if (GET_CODE (base) == CONST)
9956 base = XEXP (base, 0);
9957 if (GET_CODE (base) == PLUS)
9958 base = XEXP (base, 0);
9959 if (GET_CODE (base) == UNSPEC)
9960 switch (XINT (base, 1))
9962 case UNSPEC_TOCREL:
9963 case UNSPEC_MACHOPIC_OFFSET:
9964 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9965 for aliasing purposes. */
9966 return XVECEXP (base, 0, 0);
9969 return op;
9972 /* More elaborate version of recog's offsettable_memref_p predicate
9973 that works around the ??? note of rs6000_mode_dependent_address.
9974 In particular it accepts
9976 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9978 in 32-bit mode, that the recog predicate rejects. */
9980 static bool
9981 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9983 bool worst_case;
9985 if (!MEM_P (op))
9986 return false;
9988 /* First mimic offsettable_memref_p. */
9989 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9990 return true;
9992 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9993 the latter predicate knows nothing about the mode of the memory
9994 reference and, therefore, assumes that it is the largest supported
9995 mode (TFmode). As a consequence, legitimate offsettable memory
9996 references are rejected. rs6000_legitimate_offset_address_p contains
9997 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9998 at least with a little bit of help here given that we know the
9999 actual registers used. */
10000 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
10001 || GET_MODE_SIZE (reg_mode) == 4);
10002 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
10003 true, worst_case);
10006 /* Determine the reassociation width to be used in reassociate_bb.
10007 This takes into account how many parallel operations we
10008 can actually do of a given type, and also the latency.
10010 int add/sub 6/cycle
10011 mul 2/cycle
10012 vect add/sub/mul 2/cycle
10013 fp add/sub/mul 2/cycle
10014 dfp 1/cycle
10017 static int
10018 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
10019 machine_mode mode)
10021 switch (rs6000_cpu)
10023 case PROCESSOR_POWER8:
10024 case PROCESSOR_POWER9:
10025 if (DECIMAL_FLOAT_MODE_P (mode))
10026 return 1;
10027 if (VECTOR_MODE_P (mode))
10028 return 4;
10029 if (INTEGRAL_MODE_P (mode))
10030 return opc == MULT_EXPR ? 4 : 6;
10031 if (FLOAT_MODE_P (mode))
10032 return 4;
10033 break;
10034 default:
10035 break;
10037 return 1;
10040 /* Change register usage conditional on target flags. */
10041 static void
10042 rs6000_conditional_register_usage (void)
10044 int i;
10046 if (TARGET_DEBUG_TARGET)
10047 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10049 /* Set MQ register fixed (already call_used) so that it will not be
10050 allocated. */
10051 fixed_regs[64] = 1;
10053 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10054 if (TARGET_64BIT)
10055 fixed_regs[13] = call_used_regs[13]
10056 = call_really_used_regs[13] = 1;
10058 /* Conditionally disable FPRs. */
10059 if (TARGET_SOFT_FLOAT)
10060 for (i = 32; i < 64; i++)
10061 fixed_regs[i] = call_used_regs[i]
10062 = call_really_used_regs[i] = 1;
10064 /* The TOC register is not killed across calls in a way that is
10065 visible to the compiler. */
10066 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10067 call_really_used_regs[2] = 0;
10069 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10070 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10072 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10073 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10074 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10075 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10077 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10078 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10079 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10080 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10082 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10083 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10084 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10086 if (!TARGET_ALTIVEC && !TARGET_VSX)
10088 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10089 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10090 call_really_used_regs[VRSAVE_REGNO] = 1;
10093 if (TARGET_ALTIVEC || TARGET_VSX)
10094 global_regs[VSCR_REGNO] = 1;
10096 if (TARGET_ALTIVEC_ABI)
10098 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10099 call_used_regs[i] = call_really_used_regs[i] = 1;
10101 /* AIX reserves VR20:31 in non-extended ABI mode. */
10102 if (TARGET_XCOFF)
10103 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10104 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10109 /* Output insns to set DEST equal to the constant SOURCE as a series of
10110 lis, ori and shl instructions and return TRUE. */
10112 bool
10113 rs6000_emit_set_const (rtx dest, rtx source)
10115 machine_mode mode = GET_MODE (dest);
10116 rtx temp, set;
10117 rtx_insn *insn;
10118 HOST_WIDE_INT c;
10120 gcc_checking_assert (CONST_INT_P (source));
10121 c = INTVAL (source);
10122 switch (mode)
10124 case QImode:
10125 case HImode:
10126 emit_insn (gen_rtx_SET (dest, source));
10127 return true;
10129 case SImode:
10130 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10132 emit_insn (gen_rtx_SET (copy_rtx (temp),
10133 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10134 emit_insn (gen_rtx_SET (dest,
10135 gen_rtx_IOR (SImode, copy_rtx (temp),
10136 GEN_INT (c & 0xffff))));
10137 break;
10139 case DImode:
10140 if (!TARGET_POWERPC64)
10142 rtx hi, lo;
10144 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10145 DImode);
10146 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10147 DImode);
10148 emit_move_insn (hi, GEN_INT (c >> 32));
10149 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10150 emit_move_insn (lo, GEN_INT (c));
10152 else
10153 rs6000_emit_set_long_const (dest, c);
10154 break;
10156 default:
10157 gcc_unreachable ();
10160 insn = get_last_insn ();
10161 set = single_set (insn);
10162 if (! CONSTANT_P (SET_SRC (set)))
10163 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10165 return true;
10168 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10169 Output insns to set DEST equal to the constant C as a series of
10170 lis, ori and shl instructions. */
10172 static void
10173 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10175 rtx temp;
10176 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10178 ud1 = c & 0xffff;
10179 c = c >> 16;
10180 ud2 = c & 0xffff;
10181 c = c >> 16;
10182 ud3 = c & 0xffff;
10183 c = c >> 16;
10184 ud4 = c & 0xffff;
10186 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10187 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10188 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10190 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10191 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10193 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10195 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10196 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10197 if (ud1 != 0)
10198 emit_move_insn (dest,
10199 gen_rtx_IOR (DImode, copy_rtx (temp),
10200 GEN_INT (ud1)));
10202 else if (ud3 == 0 && ud4 == 0)
10204 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10206 gcc_assert (ud2 & 0x8000);
10207 emit_move_insn (copy_rtx (temp),
10208 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10209 if (ud1 != 0)
10210 emit_move_insn (copy_rtx (temp),
10211 gen_rtx_IOR (DImode, copy_rtx (temp),
10212 GEN_INT (ud1)));
10213 emit_move_insn (dest,
10214 gen_rtx_ZERO_EXTEND (DImode,
10215 gen_lowpart (SImode,
10216 copy_rtx (temp))));
10218 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10219 || (ud4 == 0 && ! (ud3 & 0x8000)))
10221 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10223 emit_move_insn (copy_rtx (temp),
10224 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10225 if (ud2 != 0)
10226 emit_move_insn (copy_rtx (temp),
10227 gen_rtx_IOR (DImode, copy_rtx (temp),
10228 GEN_INT (ud2)));
10229 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10230 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10231 GEN_INT (16)));
10232 if (ud1 != 0)
10233 emit_move_insn (dest,
10234 gen_rtx_IOR (DImode, copy_rtx (temp),
10235 GEN_INT (ud1)));
10237 else
10239 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10241 emit_move_insn (copy_rtx (temp),
10242 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10243 if (ud3 != 0)
10244 emit_move_insn (copy_rtx (temp),
10245 gen_rtx_IOR (DImode, copy_rtx (temp),
10246 GEN_INT (ud3)));
10248 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10249 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10250 GEN_INT (32)));
10251 if (ud2 != 0)
10252 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10253 gen_rtx_IOR (DImode, copy_rtx (temp),
10254 GEN_INT (ud2 << 16)));
10255 if (ud1 != 0)
10256 emit_move_insn (dest,
10257 gen_rtx_IOR (DImode, copy_rtx (temp),
10258 GEN_INT (ud1)));
10262 /* Helper for the following. Get rid of [r+r] memory refs
10263 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10265 static void
10266 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10268 if (GET_CODE (operands[0]) == MEM
10269 && GET_CODE (XEXP (operands[0], 0)) != REG
10270 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10271 GET_MODE (operands[0]), false))
10272 operands[0]
10273 = replace_equiv_address (operands[0],
10274 copy_addr_to_reg (XEXP (operands[0], 0)));
10276 if (GET_CODE (operands[1]) == MEM
10277 && GET_CODE (XEXP (operands[1], 0)) != REG
10278 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10279 GET_MODE (operands[1]), false))
10280 operands[1]
10281 = replace_equiv_address (operands[1],
10282 copy_addr_to_reg (XEXP (operands[1], 0)));
10285 /* Generate a vector of constants to permute MODE for a little-endian
10286 storage operation by swapping the two halves of a vector. */
10287 static rtvec
10288 rs6000_const_vec (machine_mode mode)
10290 int i, subparts;
10291 rtvec v;
10293 switch (mode)
10295 case V1TImode:
10296 subparts = 1;
10297 break;
10298 case V2DFmode:
10299 case V2DImode:
10300 subparts = 2;
10301 break;
10302 case V4SFmode:
10303 case V4SImode:
10304 subparts = 4;
10305 break;
10306 case V8HImode:
10307 subparts = 8;
10308 break;
10309 case V16QImode:
10310 subparts = 16;
10311 break;
10312 default:
10313 gcc_unreachable();
10316 v = rtvec_alloc (subparts);
10318 for (i = 0; i < subparts / 2; ++i)
10319 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10320 for (i = subparts / 2; i < subparts; ++i)
10321 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10323 return v;
10326 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10327 store operation. */
10328 void
10329 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10331 /* Scalar permutations are easier to express in integer modes rather than
10332 floating-point modes, so cast them here. We use V1TImode instead
10333 of TImode to ensure that the values don't go through GPRs. */
10334 if (FLOAT128_VECTOR_P (mode))
10336 dest = gen_lowpart (V1TImode, dest);
10337 source = gen_lowpart (V1TImode, source);
10338 mode = V1TImode;
10341 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10342 scalar. */
10343 if (mode == TImode || mode == V1TImode)
10344 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10345 GEN_INT (64))));
10346 else
10348 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10349 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10353 /* Emit a little-endian load from vector memory location SOURCE to VSX
10354 register DEST in mode MODE. The load is done with two permuting
10355 insn's that represent an lxvd2x and xxpermdi. */
10356 void
10357 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10359 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10360 V1TImode). */
10361 if (mode == TImode || mode == V1TImode)
10363 mode = V2DImode;
10364 dest = gen_lowpart (V2DImode, dest);
10365 source = adjust_address (source, V2DImode, 0);
10368 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10369 rs6000_emit_le_vsx_permute (tmp, source, mode);
10370 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10373 /* Emit a little-endian store to vector memory location DEST from VSX
10374 register SOURCE in mode MODE. The store is done with two permuting
10375 insn's that represent an xxpermdi and an stxvd2x. */
10376 void
10377 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10379 /* This should never be called during or after LRA, because it does
10380 not re-permute the source register. It is intended only for use
10381 during expand. */
10382 gcc_assert (!lra_in_progress && !reload_completed);
10384 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10385 V1TImode). */
10386 if (mode == TImode || mode == V1TImode)
10388 mode = V2DImode;
10389 dest = adjust_address (dest, V2DImode, 0);
10390 source = gen_lowpart (V2DImode, source);
10393 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10394 rs6000_emit_le_vsx_permute (tmp, source, mode);
10395 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10398 /* Emit a sequence representing a little-endian VSX load or store,
10399 moving data from SOURCE to DEST in mode MODE. This is done
10400 separately from rs6000_emit_move to ensure it is called only
10401 during expand. LE VSX loads and stores introduced later are
10402 handled with a split. The expand-time RTL generation allows
10403 us to optimize away redundant pairs of register-permutes. */
10404 void
10405 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10407 gcc_assert (!BYTES_BIG_ENDIAN
10408 && VECTOR_MEM_VSX_P (mode)
10409 && !TARGET_P9_VECTOR
10410 && !gpr_or_gpr_p (dest, source)
10411 && (MEM_P (source) ^ MEM_P (dest)));
10413 if (MEM_P (source))
10415 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10416 rs6000_emit_le_vsx_load (dest, source, mode);
10418 else
10420 if (!REG_P (source))
10421 source = force_reg (mode, source);
10422 rs6000_emit_le_vsx_store (dest, source, mode);
10426 /* Return whether a SFmode or SImode move can be done without converting one
10427 mode to another. This arrises when we have:
10429 (SUBREG:SF (REG:SI ...))
10430 (SUBREG:SI (REG:SF ...))
10432 and one of the values is in a floating point/vector register, where SFmode
10433 scalars are stored in DFmode format. */
10435 bool
10436 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10438 if (TARGET_ALLOW_SF_SUBREG)
10439 return true;
10441 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10442 return true;
10444 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10445 return true;
10447 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10448 if (SUBREG_P (dest))
10450 rtx dest_subreg = SUBREG_REG (dest);
10451 rtx src_subreg = SUBREG_REG (src);
10452 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10455 return false;
10459 /* Helper function to change moves with:
10461 (SUBREG:SF (REG:SI)) and
10462 (SUBREG:SI (REG:SF))
10464 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10465 values are stored as DFmode values in the VSX registers. We need to convert
10466 the bits before we can use a direct move or operate on the bits in the
10467 vector register as an integer type.
10469 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10471 static bool
10472 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10474 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10475 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10476 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10478 rtx inner_source = SUBREG_REG (source);
10479 machine_mode inner_mode = GET_MODE (inner_source);
10481 if (mode == SImode && inner_mode == SFmode)
10483 emit_insn (gen_movsi_from_sf (dest, inner_source));
10484 return true;
10487 if (mode == SFmode && inner_mode == SImode)
10489 emit_insn (gen_movsf_from_si (dest, inner_source));
10490 return true;
10494 return false;
10497 /* Emit a move from SOURCE to DEST in mode MODE. */
10498 void
10499 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10501 rtx operands[2];
10502 operands[0] = dest;
10503 operands[1] = source;
10505 if (TARGET_DEBUG_ADDR)
10507 fprintf (stderr,
10508 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10509 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10510 GET_MODE_NAME (mode),
10511 lra_in_progress,
10512 reload_completed,
10513 can_create_pseudo_p ());
10514 debug_rtx (dest);
10515 fprintf (stderr, "source:\n");
10516 debug_rtx (source);
10519 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10520 if (CONST_WIDE_INT_P (operands[1])
10521 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10523 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10524 gcc_unreachable ();
10527 /* See if we need to special case SImode/SFmode SUBREG moves. */
10528 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10529 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10530 return;
10532 /* Check if GCC is setting up a block move that will end up using FP
10533 registers as temporaries. We must make sure this is acceptable. */
10534 if (GET_CODE (operands[0]) == MEM
10535 && GET_CODE (operands[1]) == MEM
10536 && mode == DImode
10537 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
10538 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
10539 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
10540 ? 32 : MEM_ALIGN (operands[0])))
10541 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
10542 ? 32
10543 : MEM_ALIGN (operands[1]))))
10544 && ! MEM_VOLATILE_P (operands [0])
10545 && ! MEM_VOLATILE_P (operands [1]))
10547 emit_move_insn (adjust_address (operands[0], SImode, 0),
10548 adjust_address (operands[1], SImode, 0));
10549 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10550 adjust_address (copy_rtx (operands[1]), SImode, 4));
10551 return;
10554 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10555 && !gpc_reg_operand (operands[1], mode))
10556 operands[1] = force_reg (mode, operands[1]);
10558 /* Recognize the case where operand[1] is a reference to thread-local
10559 data and load its address to a register. */
10560 if (tls_referenced_p (operands[1]))
10562 enum tls_model model;
10563 rtx tmp = operands[1];
10564 rtx addend = NULL;
10566 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10568 addend = XEXP (XEXP (tmp, 0), 1);
10569 tmp = XEXP (XEXP (tmp, 0), 0);
10572 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10573 model = SYMBOL_REF_TLS_MODEL (tmp);
10574 gcc_assert (model != 0);
10576 tmp = rs6000_legitimize_tls_address (tmp, model);
10577 if (addend)
10579 tmp = gen_rtx_PLUS (mode, tmp, addend);
10580 tmp = force_operand (tmp, operands[0]);
10582 operands[1] = tmp;
10585 /* 128-bit constant floating-point values on Darwin should really be loaded
10586 as two parts. However, this premature splitting is a problem when DFmode
10587 values can go into Altivec registers. */
10588 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10589 && GET_CODE (operands[1]) == CONST_DOUBLE)
10591 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10592 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10593 DFmode);
10594 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10595 GET_MODE_SIZE (DFmode)),
10596 simplify_gen_subreg (DFmode, operands[1], mode,
10597 GET_MODE_SIZE (DFmode)),
10598 DFmode);
10599 return;
10602 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10603 p1:SD) if p1 is not of floating point class and p0 is spilled as
10604 we can have no analogous movsd_store for this. */
10605 if (lra_in_progress && mode == DDmode
10606 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10607 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10608 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10609 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10611 enum reg_class cl;
10612 int regno = REGNO (SUBREG_REG (operands[1]));
10614 if (regno >= FIRST_PSEUDO_REGISTER)
10616 cl = reg_preferred_class (regno);
10617 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10619 if (regno >= 0 && ! FP_REGNO_P (regno))
10621 mode = SDmode;
10622 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10623 operands[1] = SUBREG_REG (operands[1]);
10626 if (lra_in_progress
10627 && mode == SDmode
10628 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10629 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10630 && (REG_P (operands[1])
10631 || (GET_CODE (operands[1]) == SUBREG
10632 && REG_P (SUBREG_REG (operands[1])))))
10634 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10635 ? SUBREG_REG (operands[1]) : operands[1]);
10636 enum reg_class cl;
10638 if (regno >= FIRST_PSEUDO_REGISTER)
10640 cl = reg_preferred_class (regno);
10641 gcc_assert (cl != NO_REGS);
10642 regno = ira_class_hard_regs[cl][0];
10644 if (FP_REGNO_P (regno))
10646 if (GET_MODE (operands[0]) != DDmode)
10647 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10648 emit_insn (gen_movsd_store (operands[0], operands[1]));
10650 else if (INT_REGNO_P (regno))
10651 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10652 else
10653 gcc_unreachable();
10654 return;
10656 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10657 p:DD)) if p0 is not of floating point class and p1 is spilled as
10658 we can have no analogous movsd_load for this. */
10659 if (lra_in_progress && mode == DDmode
10660 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10661 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10662 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10663 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10665 enum reg_class cl;
10666 int regno = REGNO (SUBREG_REG (operands[0]));
10668 if (regno >= FIRST_PSEUDO_REGISTER)
10670 cl = reg_preferred_class (regno);
10671 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10673 if (regno >= 0 && ! FP_REGNO_P (regno))
10675 mode = SDmode;
10676 operands[0] = SUBREG_REG (operands[0]);
10677 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10680 if (lra_in_progress
10681 && mode == SDmode
10682 && (REG_P (operands[0])
10683 || (GET_CODE (operands[0]) == SUBREG
10684 && REG_P (SUBREG_REG (operands[0]))))
10685 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10686 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10688 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10689 ? SUBREG_REG (operands[0]) : operands[0]);
10690 enum reg_class cl;
10692 if (regno >= FIRST_PSEUDO_REGISTER)
10694 cl = reg_preferred_class (regno);
10695 gcc_assert (cl != NO_REGS);
10696 regno = ira_class_hard_regs[cl][0];
10698 if (FP_REGNO_P (regno))
10700 if (GET_MODE (operands[1]) != DDmode)
10701 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10702 emit_insn (gen_movsd_load (operands[0], operands[1]));
10704 else if (INT_REGNO_P (regno))
10705 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10706 else
10707 gcc_unreachable();
10708 return;
10711 /* FIXME: In the long term, this switch statement should go away
10712 and be replaced by a sequence of tests based on things like
10713 mode == Pmode. */
10714 switch (mode)
10716 case HImode:
10717 case QImode:
10718 if (CONSTANT_P (operands[1])
10719 && GET_CODE (operands[1]) != CONST_INT)
10720 operands[1] = force_const_mem (mode, operands[1]);
10721 break;
10723 case TFmode:
10724 case TDmode:
10725 case IFmode:
10726 case KFmode:
10727 if (FLOAT128_2REG_P (mode))
10728 rs6000_eliminate_indexed_memrefs (operands);
10729 /* fall through */
10731 case DFmode:
10732 case DDmode:
10733 case SFmode:
10734 case SDmode:
10735 if (CONSTANT_P (operands[1])
10736 && ! easy_fp_constant (operands[1], mode))
10737 operands[1] = force_const_mem (mode, operands[1]);
10738 break;
10740 case V16QImode:
10741 case V8HImode:
10742 case V4SFmode:
10743 case V4SImode:
10744 case V2SFmode:
10745 case V2SImode:
10746 case V2DFmode:
10747 case V2DImode:
10748 case V1TImode:
10749 if (CONSTANT_P (operands[1])
10750 && !easy_vector_constant (operands[1], mode))
10751 operands[1] = force_const_mem (mode, operands[1]);
10752 break;
10754 case SImode:
10755 case DImode:
10756 /* Use default pattern for address of ELF small data */
10757 if (TARGET_ELF
10758 && mode == Pmode
10759 && DEFAULT_ABI == ABI_V4
10760 && (GET_CODE (operands[1]) == SYMBOL_REF
10761 || GET_CODE (operands[1]) == CONST)
10762 && small_data_operand (operands[1], mode))
10764 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10765 return;
10768 if (DEFAULT_ABI == ABI_V4
10769 && mode == Pmode && mode == SImode
10770 && flag_pic == 1 && got_operand (operands[1], mode))
10772 emit_insn (gen_movsi_got (operands[0], operands[1]));
10773 return;
10776 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10777 && TARGET_NO_TOC
10778 && ! flag_pic
10779 && mode == Pmode
10780 && CONSTANT_P (operands[1])
10781 && GET_CODE (operands[1]) != HIGH
10782 && GET_CODE (operands[1]) != CONST_INT)
10784 rtx target = (!can_create_pseudo_p ()
10785 ? operands[0]
10786 : gen_reg_rtx (mode));
10788 /* If this is a function address on -mcall-aixdesc,
10789 convert it to the address of the descriptor. */
10790 if (DEFAULT_ABI == ABI_AIX
10791 && GET_CODE (operands[1]) == SYMBOL_REF
10792 && XSTR (operands[1], 0)[0] == '.')
10794 const char *name = XSTR (operands[1], 0);
10795 rtx new_ref;
10796 while (*name == '.')
10797 name++;
10798 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10799 CONSTANT_POOL_ADDRESS_P (new_ref)
10800 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10801 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10802 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10803 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10804 operands[1] = new_ref;
10807 if (DEFAULT_ABI == ABI_DARWIN)
10809 #if TARGET_MACHO
10810 if (MACHO_DYNAMIC_NO_PIC_P)
10812 /* Take care of any required data indirection. */
10813 operands[1] = rs6000_machopic_legitimize_pic_address (
10814 operands[1], mode, operands[0]);
10815 if (operands[0] != operands[1])
10816 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10817 return;
10819 #endif
10820 emit_insn (gen_macho_high (target, operands[1]));
10821 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10822 return;
10825 emit_insn (gen_elf_high (target, operands[1]));
10826 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10827 return;
10830 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10831 and we have put it in the TOC, we just need to make a TOC-relative
10832 reference to it. */
10833 if (TARGET_TOC
10834 && GET_CODE (operands[1]) == SYMBOL_REF
10835 && use_toc_relative_ref (operands[1], mode))
10836 operands[1] = create_TOC_reference (operands[1], operands[0]);
10837 else if (mode == Pmode
10838 && CONSTANT_P (operands[1])
10839 && GET_CODE (operands[1]) != HIGH
10840 && ((GET_CODE (operands[1]) != CONST_INT
10841 && ! easy_fp_constant (operands[1], mode))
10842 || (GET_CODE (operands[1]) == CONST_INT
10843 && (num_insns_constant (operands[1], mode)
10844 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10845 || (GET_CODE (operands[0]) == REG
10846 && FP_REGNO_P (REGNO (operands[0]))))
10847 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10848 && (TARGET_CMODEL == CMODEL_SMALL
10849 || can_create_pseudo_p ()
10850 || (REG_P (operands[0])
10851 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10854 #if TARGET_MACHO
10855 /* Darwin uses a special PIC legitimizer. */
10856 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10858 operands[1] =
10859 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10860 operands[0]);
10861 if (operands[0] != operands[1])
10862 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10863 return;
10865 #endif
10867 /* If we are to limit the number of things we put in the TOC and
10868 this is a symbol plus a constant we can add in one insn,
10869 just put the symbol in the TOC and add the constant. */
10870 if (GET_CODE (operands[1]) == CONST
10871 && TARGET_NO_SUM_IN_TOC
10872 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10873 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10874 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10875 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10876 && ! side_effects_p (operands[0]))
10878 rtx sym =
10879 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10880 rtx other = XEXP (XEXP (operands[1], 0), 1);
10882 sym = force_reg (mode, sym);
10883 emit_insn (gen_add3_insn (operands[0], sym, other));
10884 return;
10887 operands[1] = force_const_mem (mode, operands[1]);
10889 if (TARGET_TOC
10890 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10891 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10893 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10894 operands[0]);
10895 operands[1] = gen_const_mem (mode, tocref);
10896 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10899 break;
10901 case TImode:
10902 if (!VECTOR_MEM_VSX_P (TImode))
10903 rs6000_eliminate_indexed_memrefs (operands);
10904 break;
10906 case PTImode:
10907 rs6000_eliminate_indexed_memrefs (operands);
10908 break;
10910 default:
10911 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10914 /* Above, we may have called force_const_mem which may have returned
10915 an invalid address. If we can, fix this up; otherwise, reload will
10916 have to deal with it. */
10917 if (GET_CODE (operands[1]) == MEM)
10918 operands[1] = validize_mem (operands[1]);
10920 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10923 /* Nonzero if we can use a floating-point register to pass this arg. */
10924 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10925 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10926 && (CUM)->fregno <= FP_ARG_MAX_REG \
10927 && TARGET_HARD_FLOAT)
10929 /* Nonzero if we can use an AltiVec register to pass this arg. */
10930 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10931 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10932 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10933 && TARGET_ALTIVEC_ABI \
10934 && (NAMED))
10936 /* Walk down the type tree of TYPE counting consecutive base elements.
10937 If *MODEP is VOIDmode, then set it to the first valid floating point
10938 or vector type. If a non-floating point or vector type is found, or
10939 if a floating point or vector type that doesn't match a non-VOIDmode
10940 *MODEP is found, then return -1, otherwise return the count in the
10941 sub-tree. */
10943 static int
10944 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10946 machine_mode mode;
10947 HOST_WIDE_INT size;
10949 switch (TREE_CODE (type))
10951 case REAL_TYPE:
10952 mode = TYPE_MODE (type);
10953 if (!SCALAR_FLOAT_MODE_P (mode))
10954 return -1;
10956 if (*modep == VOIDmode)
10957 *modep = mode;
10959 if (*modep == mode)
10960 return 1;
10962 break;
10964 case COMPLEX_TYPE:
10965 mode = TYPE_MODE (TREE_TYPE (type));
10966 if (!SCALAR_FLOAT_MODE_P (mode))
10967 return -1;
10969 if (*modep == VOIDmode)
10970 *modep = mode;
10972 if (*modep == mode)
10973 return 2;
10975 break;
10977 case VECTOR_TYPE:
10978 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10979 return -1;
10981 /* Use V4SImode as representative of all 128-bit vector types. */
10982 size = int_size_in_bytes (type);
10983 switch (size)
10985 case 16:
10986 mode = V4SImode;
10987 break;
10988 default:
10989 return -1;
10992 if (*modep == VOIDmode)
10993 *modep = mode;
10995 /* Vector modes are considered to be opaque: two vectors are
10996 equivalent for the purposes of being homogeneous aggregates
10997 if they are the same size. */
10998 if (*modep == mode)
10999 return 1;
11001 break;
11003 case ARRAY_TYPE:
11005 int count;
11006 tree index = TYPE_DOMAIN (type);
11008 /* Can't handle incomplete types nor sizes that are not
11009 fixed. */
11010 if (!COMPLETE_TYPE_P (type)
11011 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11012 return -1;
11014 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11015 if (count == -1
11016 || !index
11017 || !TYPE_MAX_VALUE (index)
11018 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11019 || !TYPE_MIN_VALUE (index)
11020 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11021 || count < 0)
11022 return -1;
11024 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11025 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11027 /* There must be no padding. */
11028 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11029 return -1;
11031 return count;
11034 case RECORD_TYPE:
11036 int count = 0;
11037 int sub_count;
11038 tree field;
11040 /* Can't handle incomplete types nor sizes that are not
11041 fixed. */
11042 if (!COMPLETE_TYPE_P (type)
11043 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11044 return -1;
11046 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11048 if (TREE_CODE (field) != FIELD_DECL)
11049 continue;
11051 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11052 if (sub_count < 0)
11053 return -1;
11054 count += sub_count;
11057 /* There must be no padding. */
11058 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11059 return -1;
11061 return count;
11064 case UNION_TYPE:
11065 case QUAL_UNION_TYPE:
11067 /* These aren't very interesting except in a degenerate case. */
11068 int count = 0;
11069 int sub_count;
11070 tree field;
11072 /* Can't handle incomplete types nor sizes that are not
11073 fixed. */
11074 if (!COMPLETE_TYPE_P (type)
11075 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11076 return -1;
11078 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11080 if (TREE_CODE (field) != FIELD_DECL)
11081 continue;
11083 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11084 if (sub_count < 0)
11085 return -1;
11086 count = count > sub_count ? count : sub_count;
11089 /* There must be no padding. */
11090 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11091 return -1;
11093 return count;
11096 default:
11097 break;
11100 return -1;
11103 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11104 float or vector aggregate that shall be passed in FP/vector registers
11105 according to the ELFv2 ABI, return the homogeneous element mode in
11106 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11108 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11110 static bool
11111 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11112 machine_mode *elt_mode,
11113 int *n_elts)
11115 /* Note that we do not accept complex types at the top level as
11116 homogeneous aggregates; these types are handled via the
11117 targetm.calls.split_complex_arg mechanism. Complex types
11118 can be elements of homogeneous aggregates, however. */
11119 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11121 machine_mode field_mode = VOIDmode;
11122 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11124 if (field_count > 0)
11126 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11127 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11129 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11130 up to AGGR_ARG_NUM_REG registers. */
11131 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11133 if (elt_mode)
11134 *elt_mode = field_mode;
11135 if (n_elts)
11136 *n_elts = field_count;
11137 return true;
11142 if (elt_mode)
11143 *elt_mode = mode;
11144 if (n_elts)
11145 *n_elts = 1;
11146 return false;
11149 /* Return a nonzero value to say to return the function value in
11150 memory, just as large structures are always returned. TYPE will be
11151 the data type of the value, and FNTYPE will be the type of the
11152 function doing the returning, or @code{NULL} for libcalls.
11154 The AIX ABI for the RS/6000 specifies that all structures are
11155 returned in memory. The Darwin ABI does the same.
11157 For the Darwin 64 Bit ABI, a function result can be returned in
11158 registers or in memory, depending on the size of the return data
11159 type. If it is returned in registers, the value occupies the same
11160 registers as it would if it were the first and only function
11161 argument. Otherwise, the function places its result in memory at
11162 the location pointed to by GPR3.
11164 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11165 but a draft put them in memory, and GCC used to implement the draft
11166 instead of the final standard. Therefore, aix_struct_return
11167 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11168 compatibility can change DRAFT_V4_STRUCT_RET to override the
11169 default, and -m switches get the final word. See
11170 rs6000_option_override_internal for more details.
11172 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11173 long double support is enabled. These values are returned in memory.
11175 int_size_in_bytes returns -1 for variable size objects, which go in
11176 memory always. The cast to unsigned makes -1 > 8. */
11178 static bool
11179 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11181 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11182 if (TARGET_MACHO
11183 && rs6000_darwin64_abi
11184 && TREE_CODE (type) == RECORD_TYPE
11185 && int_size_in_bytes (type) > 0)
11187 CUMULATIVE_ARGS valcum;
11188 rtx valret;
11190 valcum.words = 0;
11191 valcum.fregno = FP_ARG_MIN_REG;
11192 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11193 /* Do a trial code generation as if this were going to be passed
11194 as an argument; if any part goes in memory, we return NULL. */
11195 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11196 if (valret)
11197 return false;
11198 /* Otherwise fall through to more conventional ABI rules. */
11201 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11202 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11203 NULL, NULL))
11204 return false;
11206 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11207 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11208 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11209 return false;
11211 if (AGGREGATE_TYPE_P (type)
11212 && (aix_struct_return
11213 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11214 return true;
11216 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11217 modes only exist for GCC vector types if -maltivec. */
11218 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11219 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11220 return false;
11222 /* Return synthetic vectors in memory. */
11223 if (TREE_CODE (type) == VECTOR_TYPE
11224 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11226 static bool warned_for_return_big_vectors = false;
11227 if (!warned_for_return_big_vectors)
11229 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11230 "non-standard ABI extension with no compatibility guarantee");
11231 warned_for_return_big_vectors = true;
11233 return true;
11236 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11237 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11238 return true;
11240 return false;
11243 /* Specify whether values returned in registers should be at the most
11244 significant end of a register. We want aggregates returned by
11245 value to match the way aggregates are passed to functions. */
11247 static bool
11248 rs6000_return_in_msb (const_tree valtype)
11250 return (DEFAULT_ABI == ABI_ELFv2
11251 && BYTES_BIG_ENDIAN
11252 && AGGREGATE_TYPE_P (valtype)
11253 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
11256 #ifdef HAVE_AS_GNU_ATTRIBUTE
11257 /* Return TRUE if a call to function FNDECL may be one that
11258 potentially affects the function calling ABI of the object file. */
11260 static bool
11261 call_ABI_of_interest (tree fndecl)
11263 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11265 struct cgraph_node *c_node;
11267 /* Libcalls are always interesting. */
11268 if (fndecl == NULL_TREE)
11269 return true;
11271 /* Any call to an external function is interesting. */
11272 if (DECL_EXTERNAL (fndecl))
11273 return true;
11275 /* Interesting functions that we are emitting in this object file. */
11276 c_node = cgraph_node::get (fndecl);
11277 c_node = c_node->ultimate_alias_target ();
11278 return !c_node->only_called_directly_p ();
11280 return false;
11282 #endif
11284 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11285 for a call to a function whose data type is FNTYPE.
11286 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11288 For incoming args we set the number of arguments in the prototype large
11289 so we never return a PARALLEL. */
11291 void
11292 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11293 rtx libname ATTRIBUTE_UNUSED, int incoming,
11294 int libcall, int n_named_args,
11295 tree fndecl ATTRIBUTE_UNUSED,
11296 machine_mode return_mode ATTRIBUTE_UNUSED)
11298 static CUMULATIVE_ARGS zero_cumulative;
11300 *cum = zero_cumulative;
11301 cum->words = 0;
11302 cum->fregno = FP_ARG_MIN_REG;
11303 cum->vregno = ALTIVEC_ARG_MIN_REG;
11304 cum->prototype = (fntype && prototype_p (fntype));
11305 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11306 ? CALL_LIBCALL : CALL_NORMAL);
11307 cum->sysv_gregno = GP_ARG_MIN_REG;
11308 cum->stdarg = stdarg_p (fntype);
11309 cum->libcall = libcall;
11311 cum->nargs_prototype = 0;
11312 if (incoming || cum->prototype)
11313 cum->nargs_prototype = n_named_args;
11315 /* Check for a longcall attribute. */
11316 if ((!fntype && rs6000_default_long_calls)
11317 || (fntype
11318 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11319 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11320 cum->call_cookie |= CALL_LONG;
11322 if (TARGET_DEBUG_ARG)
11324 fprintf (stderr, "\ninit_cumulative_args:");
11325 if (fntype)
11327 tree ret_type = TREE_TYPE (fntype);
11328 fprintf (stderr, " ret code = %s,",
11329 get_tree_code_name (TREE_CODE (ret_type)));
11332 if (cum->call_cookie & CALL_LONG)
11333 fprintf (stderr, " longcall,");
11335 fprintf (stderr, " proto = %d, nargs = %d\n",
11336 cum->prototype, cum->nargs_prototype);
11339 #ifdef HAVE_AS_GNU_ATTRIBUTE
11340 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11342 cum->escapes = call_ABI_of_interest (fndecl);
11343 if (cum->escapes)
11345 tree return_type;
11347 if (fntype)
11349 return_type = TREE_TYPE (fntype);
11350 return_mode = TYPE_MODE (return_type);
11352 else
11353 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11355 if (return_type != NULL)
11357 if (TREE_CODE (return_type) == RECORD_TYPE
11358 && TYPE_TRANSPARENT_AGGR (return_type))
11360 return_type = TREE_TYPE (first_field (return_type));
11361 return_mode = TYPE_MODE (return_type);
11363 if (AGGREGATE_TYPE_P (return_type)
11364 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11365 <= 8))
11366 rs6000_returns_struct = true;
11368 if (SCALAR_FLOAT_MODE_P (return_mode))
11370 rs6000_passes_float = true;
11371 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11372 && (FLOAT128_IBM_P (return_mode)
11373 || FLOAT128_IEEE_P (return_mode)
11374 || (return_type != NULL
11375 && (TYPE_MAIN_VARIANT (return_type)
11376 == long_double_type_node))))
11377 rs6000_passes_long_double = true;
11379 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11380 || PAIRED_VECTOR_MODE (return_mode))
11381 rs6000_passes_vector = true;
11384 #endif
11386 if (fntype
11387 && !TARGET_ALTIVEC
11388 && TARGET_ALTIVEC_ABI
11389 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11391 error ("cannot return value in vector register because"
11392 " altivec instructions are disabled, use -maltivec"
11393 " to enable them");
11397 /* The mode the ABI uses for a word. This is not the same as word_mode
11398 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11400 static machine_mode
11401 rs6000_abi_word_mode (void)
11403 return TARGET_32BIT ? SImode : DImode;
11406 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11407 static char *
11408 rs6000_offload_options (void)
11410 if (TARGET_64BIT)
11411 return xstrdup ("-foffload-abi=lp64");
11412 else
11413 return xstrdup ("-foffload-abi=ilp32");
11416 /* On rs6000, function arguments are promoted, as are function return
11417 values. */
11419 static machine_mode
11420 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11421 machine_mode mode,
11422 int *punsignedp ATTRIBUTE_UNUSED,
11423 const_tree, int)
11425 PROMOTE_MODE (mode, *punsignedp, type);
11427 return mode;
11430 /* Return true if TYPE must be passed on the stack and not in registers. */
11432 static bool
11433 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11435 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11436 return must_pass_in_stack_var_size (mode, type);
11437 else
11438 return must_pass_in_stack_var_size_or_pad (mode, type);
11441 static inline bool
11442 is_complex_IBM_long_double (machine_mode mode)
11444 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11447 /* Whether ABI_V4 passes MODE args to a function in floating point
11448 registers. */
11450 static bool
11451 abi_v4_pass_in_fpr (machine_mode mode)
11453 if (!TARGET_HARD_FLOAT)
11454 return false;
11455 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11456 return true;
11457 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11458 return true;
11459 /* ABI_V4 passes complex IBM long double in 8 gprs.
11460 Stupid, but we can't change the ABI now. */
11461 if (is_complex_IBM_long_double (mode))
11462 return false;
11463 if (FLOAT128_2REG_P (mode))
11464 return true;
11465 if (DECIMAL_FLOAT_MODE_P (mode))
11466 return true;
11467 return false;
11470 /* If defined, a C expression which determines whether, and in which
11471 direction, to pad out an argument with extra space. The value
11472 should be of type `enum direction': either `upward' to pad above
11473 the argument, `downward' to pad below, or `none' to inhibit
11474 padding.
11476 For the AIX ABI structs are always stored left shifted in their
11477 argument slot. */
11479 enum direction
11480 function_arg_padding (machine_mode mode, const_tree type)
11482 #ifndef AGGREGATE_PADDING_FIXED
11483 #define AGGREGATE_PADDING_FIXED 0
11484 #endif
11485 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11486 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11487 #endif
11489 if (!AGGREGATE_PADDING_FIXED)
11491 /* GCC used to pass structures of the same size as integer types as
11492 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
11493 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11494 passed padded downward, except that -mstrict-align further
11495 muddied the water in that multi-component structures of 2 and 4
11496 bytes in size were passed padded upward.
11498 The following arranges for best compatibility with previous
11499 versions of gcc, but removes the -mstrict-align dependency. */
11500 if (BYTES_BIG_ENDIAN)
11502 HOST_WIDE_INT size = 0;
11504 if (mode == BLKmode)
11506 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11507 size = int_size_in_bytes (type);
11509 else
11510 size = GET_MODE_SIZE (mode);
11512 if (size == 1 || size == 2 || size == 4)
11513 return downward;
11515 return upward;
11518 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11520 if (type != 0 && AGGREGATE_TYPE_P (type))
11521 return upward;
11524 /* Fall back to the default. */
11525 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
11528 /* If defined, a C expression that gives the alignment boundary, in bits,
11529 of an argument with the specified mode and type. If it is not defined,
11530 PARM_BOUNDARY is used for all arguments.
11532 V.4 wants long longs and doubles to be double word aligned. Just
11533 testing the mode size is a boneheaded way to do this as it means
11534 that other types such as complex int are also double word aligned.
11535 However, we're stuck with this because changing the ABI might break
11536 existing library interfaces.
11538 Quadword align Altivec/VSX vectors.
11539 Quadword align large synthetic vector types. */
11541 static unsigned int
11542 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11544 machine_mode elt_mode;
11545 int n_elts;
11547 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11549 if (DEFAULT_ABI == ABI_V4
11550 && (GET_MODE_SIZE (mode) == 8
11551 || (TARGET_HARD_FLOAT
11552 && !is_complex_IBM_long_double (mode)
11553 && FLOAT128_2REG_P (mode))))
11554 return 64;
11555 else if (FLOAT128_VECTOR_P (mode))
11556 return 128;
11557 else if (PAIRED_VECTOR_MODE (mode)
11558 || (type && TREE_CODE (type) == VECTOR_TYPE
11559 && int_size_in_bytes (type) >= 8
11560 && int_size_in_bytes (type) < 16))
11561 return 64;
11562 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11563 || (type && TREE_CODE (type) == VECTOR_TYPE
11564 && int_size_in_bytes (type) >= 16))
11565 return 128;
11567 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11568 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11569 -mcompat-align-parm is used. */
11570 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11571 || DEFAULT_ABI == ABI_ELFv2)
11572 && type && TYPE_ALIGN (type) > 64)
11574 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11575 or homogeneous float/vector aggregates here. We already handled
11576 vector aggregates above, but still need to check for float here. */
11577 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11578 && !SCALAR_FLOAT_MODE_P (elt_mode));
11580 /* We used to check for BLKmode instead of the above aggregate type
11581 check. Warn when this results in any difference to the ABI. */
11582 if (aggregate_p != (mode == BLKmode))
11584 static bool warned;
11585 if (!warned && warn_psabi)
11587 warned = true;
11588 inform (input_location,
11589 "the ABI of passing aggregates with %d-byte alignment"
11590 " has changed in GCC 5",
11591 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11595 if (aggregate_p)
11596 return 128;
11599 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11600 implement the "aggregate type" check as a BLKmode check here; this
11601 means certain aggregate types are in fact not aligned. */
11602 if (TARGET_MACHO && rs6000_darwin64_abi
11603 && mode == BLKmode
11604 && type && TYPE_ALIGN (type) > 64)
11605 return 128;
11607 return PARM_BOUNDARY;
11610 /* The offset in words to the start of the parameter save area. */
11612 static unsigned int
11613 rs6000_parm_offset (void)
11615 return (DEFAULT_ABI == ABI_V4 ? 2
11616 : DEFAULT_ABI == ABI_ELFv2 ? 4
11617 : 6);
11620 /* For a function parm of MODE and TYPE, return the starting word in
11621 the parameter area. NWORDS of the parameter area are already used. */
11623 static unsigned int
11624 rs6000_parm_start (machine_mode mode, const_tree type,
11625 unsigned int nwords)
11627 unsigned int align;
11629 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11630 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11633 /* Compute the size (in words) of a function argument. */
11635 static unsigned long
11636 rs6000_arg_size (machine_mode mode, const_tree type)
11638 unsigned long size;
11640 if (mode != BLKmode)
11641 size = GET_MODE_SIZE (mode);
11642 else
11643 size = int_size_in_bytes (type);
11645 if (TARGET_32BIT)
11646 return (size + 3) >> 2;
11647 else
11648 return (size + 7) >> 3;
11651 /* Use this to flush pending int fields. */
11653 static void
11654 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11655 HOST_WIDE_INT bitpos, int final)
11657 unsigned int startbit, endbit;
11658 int intregs, intoffset;
11659 machine_mode mode;
11661 /* Handle the situations where a float is taking up the first half
11662 of the GPR, and the other half is empty (typically due to
11663 alignment restrictions). We can detect this by a 8-byte-aligned
11664 int field, or by seeing that this is the final flush for this
11665 argument. Count the word and continue on. */
11666 if (cum->floats_in_gpr == 1
11667 && (cum->intoffset % 64 == 0
11668 || (cum->intoffset == -1 && final)))
11670 cum->words++;
11671 cum->floats_in_gpr = 0;
11674 if (cum->intoffset == -1)
11675 return;
11677 intoffset = cum->intoffset;
11678 cum->intoffset = -1;
11679 cum->floats_in_gpr = 0;
11681 if (intoffset % BITS_PER_WORD != 0)
11683 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
11684 MODE_INT, 0);
11685 if (mode == BLKmode)
11687 /* We couldn't find an appropriate mode, which happens,
11688 e.g., in packed structs when there are 3 bytes to load.
11689 Back intoffset back to the beginning of the word in this
11690 case. */
11691 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11695 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11696 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11697 intregs = (endbit - startbit) / BITS_PER_WORD;
11698 cum->words += intregs;
11699 /* words should be unsigned. */
11700 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11702 int pad = (endbit/BITS_PER_WORD) - cum->words;
11703 cum->words += pad;
11707 /* The darwin64 ABI calls for us to recurse down through structs,
11708 looking for elements passed in registers. Unfortunately, we have
11709 to track int register count here also because of misalignments
11710 in powerpc alignment mode. */
11712 static void
11713 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11714 const_tree type,
11715 HOST_WIDE_INT startbitpos)
11717 tree f;
11719 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11720 if (TREE_CODE (f) == FIELD_DECL)
11722 HOST_WIDE_INT bitpos = startbitpos;
11723 tree ftype = TREE_TYPE (f);
11724 machine_mode mode;
11725 if (ftype == error_mark_node)
11726 continue;
11727 mode = TYPE_MODE (ftype);
11729 if (DECL_SIZE (f) != 0
11730 && tree_fits_uhwi_p (bit_position (f)))
11731 bitpos += int_bit_position (f);
11733 /* ??? FIXME: else assume zero offset. */
11735 if (TREE_CODE (ftype) == RECORD_TYPE)
11736 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11737 else if (USE_FP_FOR_ARG_P (cum, mode))
11739 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11740 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11741 cum->fregno += n_fpregs;
11742 /* Single-precision floats present a special problem for
11743 us, because they are smaller than an 8-byte GPR, and so
11744 the structure-packing rules combined with the standard
11745 varargs behavior mean that we want to pack float/float
11746 and float/int combinations into a single register's
11747 space. This is complicated by the arg advance flushing,
11748 which works on arbitrarily large groups of int-type
11749 fields. */
11750 if (mode == SFmode)
11752 if (cum->floats_in_gpr == 1)
11754 /* Two floats in a word; count the word and reset
11755 the float count. */
11756 cum->words++;
11757 cum->floats_in_gpr = 0;
11759 else if (bitpos % 64 == 0)
11761 /* A float at the beginning of an 8-byte word;
11762 count it and put off adjusting cum->words until
11763 we see if a arg advance flush is going to do it
11764 for us. */
11765 cum->floats_in_gpr++;
11767 else
11769 /* The float is at the end of a word, preceded
11770 by integer fields, so the arg advance flush
11771 just above has already set cum->words and
11772 everything is taken care of. */
11775 else
11776 cum->words += n_fpregs;
11778 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11780 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11781 cum->vregno++;
11782 cum->words += 2;
11784 else if (cum->intoffset == -1)
11785 cum->intoffset = bitpos;
11789 /* Check for an item that needs to be considered specially under the darwin 64
11790 bit ABI. These are record types where the mode is BLK or the structure is
11791 8 bytes in size. */
11792 static int
11793 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11795 return rs6000_darwin64_abi
11796 && ((mode == BLKmode
11797 && TREE_CODE (type) == RECORD_TYPE
11798 && int_size_in_bytes (type) > 0)
11799 || (type && TREE_CODE (type) == RECORD_TYPE
11800 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11803 /* Update the data in CUM to advance over an argument
11804 of mode MODE and data type TYPE.
11805 (TYPE is null for libcalls where that information may not be available.)
11807 Note that for args passed by reference, function_arg will be called
11808 with MODE and TYPE set to that of the pointer to the arg, not the arg
11809 itself. */
11811 static void
11812 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11813 const_tree type, bool named, int depth)
11815 machine_mode elt_mode;
11816 int n_elts;
11818 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11820 /* Only tick off an argument if we're not recursing. */
11821 if (depth == 0)
11822 cum->nargs_prototype--;
11824 #ifdef HAVE_AS_GNU_ATTRIBUTE
11825 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11826 && cum->escapes)
11828 if (SCALAR_FLOAT_MODE_P (mode))
11830 rs6000_passes_float = true;
11831 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11832 && (FLOAT128_IBM_P (mode)
11833 || FLOAT128_IEEE_P (mode)
11834 || (type != NULL
11835 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11836 rs6000_passes_long_double = true;
11838 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11839 || (PAIRED_VECTOR_MODE (mode)
11840 && !cum->stdarg
11841 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11842 rs6000_passes_vector = true;
11844 #endif
11846 if (TARGET_ALTIVEC_ABI
11847 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11848 || (type && TREE_CODE (type) == VECTOR_TYPE
11849 && int_size_in_bytes (type) == 16)))
11851 bool stack = false;
11853 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11855 cum->vregno += n_elts;
11857 if (!TARGET_ALTIVEC)
11858 error ("cannot pass argument in vector register because"
11859 " altivec instructions are disabled, use -maltivec"
11860 " to enable them");
11862 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11863 even if it is going to be passed in a vector register.
11864 Darwin does the same for variable-argument functions. */
11865 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11866 && TARGET_64BIT)
11867 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11868 stack = true;
11870 else
11871 stack = true;
11873 if (stack)
11875 int align;
11877 /* Vector parameters must be 16-byte aligned. In 32-bit
11878 mode this means we need to take into account the offset
11879 to the parameter save area. In 64-bit mode, they just
11880 have to start on an even word, since the parameter save
11881 area is 16-byte aligned. */
11882 if (TARGET_32BIT)
11883 align = -(rs6000_parm_offset () + cum->words) & 3;
11884 else
11885 align = cum->words & 1;
11886 cum->words += align + rs6000_arg_size (mode, type);
11888 if (TARGET_DEBUG_ARG)
11890 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11891 cum->words, align);
11892 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11893 cum->nargs_prototype, cum->prototype,
11894 GET_MODE_NAME (mode));
11898 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11900 int size = int_size_in_bytes (type);
11901 /* Variable sized types have size == -1 and are
11902 treated as if consisting entirely of ints.
11903 Pad to 16 byte boundary if needed. */
11904 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11905 && (cum->words % 2) != 0)
11906 cum->words++;
11907 /* For varargs, we can just go up by the size of the struct. */
11908 if (!named)
11909 cum->words += (size + 7) / 8;
11910 else
11912 /* It is tempting to say int register count just goes up by
11913 sizeof(type)/8, but this is wrong in a case such as
11914 { int; double; int; } [powerpc alignment]. We have to
11915 grovel through the fields for these too. */
11916 cum->intoffset = 0;
11917 cum->floats_in_gpr = 0;
11918 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11919 rs6000_darwin64_record_arg_advance_flush (cum,
11920 size * BITS_PER_UNIT, 1);
11922 if (TARGET_DEBUG_ARG)
11924 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11925 cum->words, TYPE_ALIGN (type), size);
11926 fprintf (stderr,
11927 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11928 cum->nargs_prototype, cum->prototype,
11929 GET_MODE_NAME (mode));
11932 else if (DEFAULT_ABI == ABI_V4)
11934 if (abi_v4_pass_in_fpr (mode))
11936 /* _Decimal128 must use an even/odd register pair. This assumes
11937 that the register number is odd when fregno is odd. */
11938 if (mode == TDmode && (cum->fregno % 2) == 1)
11939 cum->fregno++;
11941 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11942 <= FP_ARG_V4_MAX_REG)
11943 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11944 else
11946 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11947 if (mode == DFmode || FLOAT128_IBM_P (mode)
11948 || mode == DDmode || mode == TDmode)
11949 cum->words += cum->words & 1;
11950 cum->words += rs6000_arg_size (mode, type);
11953 else
11955 int n_words = rs6000_arg_size (mode, type);
11956 int gregno = cum->sysv_gregno;
11958 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11959 As does any other 2 word item such as complex int due to a
11960 historical mistake. */
11961 if (n_words == 2)
11962 gregno += (1 - gregno) & 1;
11964 /* Multi-reg args are not split between registers and stack. */
11965 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11967 /* Long long is aligned on the stack. So are other 2 word
11968 items such as complex int due to a historical mistake. */
11969 if (n_words == 2)
11970 cum->words += cum->words & 1;
11971 cum->words += n_words;
11974 /* Note: continuing to accumulate gregno past when we've started
11975 spilling to the stack indicates the fact that we've started
11976 spilling to the stack to expand_builtin_saveregs. */
11977 cum->sysv_gregno = gregno + n_words;
11980 if (TARGET_DEBUG_ARG)
11982 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11983 cum->words, cum->fregno);
11984 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11985 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11986 fprintf (stderr, "mode = %4s, named = %d\n",
11987 GET_MODE_NAME (mode), named);
11990 else
11992 int n_words = rs6000_arg_size (mode, type);
11993 int start_words = cum->words;
11994 int align_words = rs6000_parm_start (mode, type, start_words);
11996 cum->words = align_words + n_words;
11998 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
12000 /* _Decimal128 must be passed in an even/odd float register pair.
12001 This assumes that the register number is odd when fregno is
12002 odd. */
12003 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12004 cum->fregno++;
12005 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
12008 if (TARGET_DEBUG_ARG)
12010 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12011 cum->words, cum->fregno);
12012 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
12013 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
12014 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
12015 named, align_words - start_words, depth);
12020 static void
12021 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12022 const_tree type, bool named)
12024 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12028 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12029 structure between cum->intoffset and bitpos to integer registers. */
12031 static void
12032 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12033 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12035 machine_mode mode;
12036 unsigned int regno;
12037 unsigned int startbit, endbit;
12038 int this_regno, intregs, intoffset;
12039 rtx reg;
12041 if (cum->intoffset == -1)
12042 return;
12044 intoffset = cum->intoffset;
12045 cum->intoffset = -1;
12047 /* If this is the trailing part of a word, try to only load that
12048 much into the register. Otherwise load the whole register. Note
12049 that in the latter case we may pick up unwanted bits. It's not a
12050 problem at the moment but may wish to revisit. */
12052 if (intoffset % BITS_PER_WORD != 0)
12054 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
12055 MODE_INT, 0);
12056 if (mode == BLKmode)
12058 /* We couldn't find an appropriate mode, which happens,
12059 e.g., in packed structs when there are 3 bytes to load.
12060 Back intoffset back to the beginning of the word in this
12061 case. */
12062 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12063 mode = word_mode;
12066 else
12067 mode = word_mode;
12069 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12070 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12071 intregs = (endbit - startbit) / BITS_PER_WORD;
12072 this_regno = cum->words + intoffset / BITS_PER_WORD;
12074 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12075 cum->use_stack = 1;
12077 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12078 if (intregs <= 0)
12079 return;
12081 intoffset /= BITS_PER_UNIT;
12084 regno = GP_ARG_MIN_REG + this_regno;
12085 reg = gen_rtx_REG (mode, regno);
12086 rvec[(*k)++] =
12087 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12089 this_regno += 1;
12090 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12091 mode = word_mode;
12092 intregs -= 1;
12094 while (intregs > 0);
12097 /* Recursive workhorse for the following. */
12099 static void
12100 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12101 HOST_WIDE_INT startbitpos, rtx rvec[],
12102 int *k)
12104 tree f;
12106 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12107 if (TREE_CODE (f) == FIELD_DECL)
12109 HOST_WIDE_INT bitpos = startbitpos;
12110 tree ftype = TREE_TYPE (f);
12111 machine_mode mode;
12112 if (ftype == error_mark_node)
12113 continue;
12114 mode = TYPE_MODE (ftype);
12116 if (DECL_SIZE (f) != 0
12117 && tree_fits_uhwi_p (bit_position (f)))
12118 bitpos += int_bit_position (f);
12120 /* ??? FIXME: else assume zero offset. */
12122 if (TREE_CODE (ftype) == RECORD_TYPE)
12123 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12124 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12126 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12127 #if 0
12128 switch (mode)
12130 case SCmode: mode = SFmode; break;
12131 case DCmode: mode = DFmode; break;
12132 case TCmode: mode = TFmode; break;
12133 default: break;
12135 #endif
12136 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12137 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12139 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12140 && (mode == TFmode || mode == TDmode));
12141 /* Long double or _Decimal128 split over regs and memory. */
12142 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12143 cum->use_stack=1;
12145 rvec[(*k)++]
12146 = gen_rtx_EXPR_LIST (VOIDmode,
12147 gen_rtx_REG (mode, cum->fregno++),
12148 GEN_INT (bitpos / BITS_PER_UNIT));
12149 if (FLOAT128_2REG_P (mode))
12150 cum->fregno++;
12152 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12154 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12155 rvec[(*k)++]
12156 = gen_rtx_EXPR_LIST (VOIDmode,
12157 gen_rtx_REG (mode, cum->vregno++),
12158 GEN_INT (bitpos / BITS_PER_UNIT));
12160 else if (cum->intoffset == -1)
12161 cum->intoffset = bitpos;
12165 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12166 the register(s) to be used for each field and subfield of a struct
12167 being passed by value, along with the offset of where the
12168 register's value may be found in the block. FP fields go in FP
12169 register, vector fields go in vector registers, and everything
12170 else goes in int registers, packed as in memory.
12172 This code is also used for function return values. RETVAL indicates
12173 whether this is the case.
12175 Much of this is taken from the SPARC V9 port, which has a similar
12176 calling convention. */
12178 static rtx
12179 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12180 bool named, bool retval)
12182 rtx rvec[FIRST_PSEUDO_REGISTER];
12183 int k = 1, kbase = 1;
12184 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12185 /* This is a copy; modifications are not visible to our caller. */
12186 CUMULATIVE_ARGS copy_cum = *orig_cum;
12187 CUMULATIVE_ARGS *cum = &copy_cum;
12189 /* Pad to 16 byte boundary if needed. */
12190 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12191 && (cum->words % 2) != 0)
12192 cum->words++;
12194 cum->intoffset = 0;
12195 cum->use_stack = 0;
12196 cum->named = named;
12198 /* Put entries into rvec[] for individual FP and vector fields, and
12199 for the chunks of memory that go in int regs. Note we start at
12200 element 1; 0 is reserved for an indication of using memory, and
12201 may or may not be filled in below. */
12202 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12203 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12205 /* If any part of the struct went on the stack put all of it there.
12206 This hack is because the generic code for
12207 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12208 parts of the struct are not at the beginning. */
12209 if (cum->use_stack)
12211 if (retval)
12212 return NULL_RTX; /* doesn't go in registers at all */
12213 kbase = 0;
12214 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12216 if (k > 1 || cum->use_stack)
12217 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12218 else
12219 return NULL_RTX;
12222 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12224 static rtx
12225 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12226 int align_words)
12228 int n_units;
12229 int i, k;
12230 rtx rvec[GP_ARG_NUM_REG + 1];
12232 if (align_words >= GP_ARG_NUM_REG)
12233 return NULL_RTX;
12235 n_units = rs6000_arg_size (mode, type);
12237 /* Optimize the simple case where the arg fits in one gpr, except in
12238 the case of BLKmode due to assign_parms assuming that registers are
12239 BITS_PER_WORD wide. */
12240 if (n_units == 0
12241 || (n_units == 1 && mode != BLKmode))
12242 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12244 k = 0;
12245 if (align_words + n_units > GP_ARG_NUM_REG)
12246 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12247 using a magic NULL_RTX component.
12248 This is not strictly correct. Only some of the arg belongs in
12249 memory, not all of it. However, the normal scheme using
12250 function_arg_partial_nregs can result in unusual subregs, eg.
12251 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12252 store the whole arg to memory is often more efficient than code
12253 to store pieces, and we know that space is available in the right
12254 place for the whole arg. */
12255 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12257 i = 0;
12260 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12261 rtx off = GEN_INT (i++ * 4);
12262 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12264 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12266 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12269 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12270 but must also be copied into the parameter save area starting at
12271 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12272 to the GPRs and/or memory. Return the number of elements used. */
12274 static int
12275 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12276 int align_words, rtx *rvec)
12278 int k = 0;
12280 if (align_words < GP_ARG_NUM_REG)
12282 int n_words = rs6000_arg_size (mode, type);
12284 if (align_words + n_words > GP_ARG_NUM_REG
12285 || mode == BLKmode
12286 || (TARGET_32BIT && TARGET_POWERPC64))
12288 /* If this is partially on the stack, then we only
12289 include the portion actually in registers here. */
12290 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12291 int i = 0;
12293 if (align_words + n_words > GP_ARG_NUM_REG)
12295 /* Not all of the arg fits in gprs. Say that it goes in memory
12296 too, using a magic NULL_RTX component. Also see comment in
12297 rs6000_mixed_function_arg for why the normal
12298 function_arg_partial_nregs scheme doesn't work in this case. */
12299 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12304 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12305 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12306 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12308 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12310 else
12312 /* The whole arg fits in gprs. */
12313 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12314 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12317 else
12319 /* It's entirely in memory. */
12320 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12323 return k;
12326 /* RVEC is a vector of K components of an argument of mode MODE.
12327 Construct the final function_arg return value from it. */
12329 static rtx
12330 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12332 gcc_assert (k >= 1);
12334 /* Avoid returning a PARALLEL in the trivial cases. */
12335 if (k == 1)
12337 if (XEXP (rvec[0], 0) == NULL_RTX)
12338 return NULL_RTX;
12340 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12341 return XEXP (rvec[0], 0);
12344 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12347 /* Determine where to put an argument to a function.
12348 Value is zero to push the argument on the stack,
12349 or a hard register in which to store the argument.
12351 MODE is the argument's machine mode.
12352 TYPE is the data type of the argument (as a tree).
12353 This is null for libcalls where that information may
12354 not be available.
12355 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12356 the preceding args and about the function being called. It is
12357 not modified in this routine.
12358 NAMED is nonzero if this argument is a named parameter
12359 (otherwise it is an extra parameter matching an ellipsis).
12361 On RS/6000 the first eight words of non-FP are normally in registers
12362 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12363 Under V.4, the first 8 FP args are in registers.
12365 If this is floating-point and no prototype is specified, we use
12366 both an FP and integer register (or possibly FP reg and stack). Library
12367 functions (when CALL_LIBCALL is set) always have the proper types for args,
12368 so we can pass the FP value just in one register. emit_library_function
12369 doesn't support PARALLEL anyway.
12371 Note that for args passed by reference, function_arg will be called
12372 with MODE and TYPE set to that of the pointer to the arg, not the arg
12373 itself. */
12375 static rtx
12376 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12377 const_tree type, bool named)
12379 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12380 enum rs6000_abi abi = DEFAULT_ABI;
12381 machine_mode elt_mode;
12382 int n_elts;
12384 /* Return a marker to indicate whether CR1 needs to set or clear the
12385 bit that V.4 uses to say fp args were passed in registers.
12386 Assume that we don't need the marker for software floating point,
12387 or compiler generated library calls. */
12388 if (mode == VOIDmode)
12390 if (abi == ABI_V4
12391 && (cum->call_cookie & CALL_LIBCALL) == 0
12392 && (cum->stdarg
12393 || (cum->nargs_prototype < 0
12394 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12395 && TARGET_HARD_FLOAT)
12396 return GEN_INT (cum->call_cookie
12397 | ((cum->fregno == FP_ARG_MIN_REG)
12398 ? CALL_V4_SET_FP_ARGS
12399 : CALL_V4_CLEAR_FP_ARGS));
12401 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12404 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12406 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12408 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12409 if (rslt != NULL_RTX)
12410 return rslt;
12411 /* Else fall through to usual handling. */
12414 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12416 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12417 rtx r, off;
12418 int i, k = 0;
12420 /* Do we also need to pass this argument in the parameter save area?
12421 Library support functions for IEEE 128-bit are assumed to not need the
12422 value passed both in GPRs and in vector registers. */
12423 if (TARGET_64BIT && !cum->prototype
12424 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12426 int align_words = ROUND_UP (cum->words, 2);
12427 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12430 /* Describe where this argument goes in the vector registers. */
12431 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12433 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12434 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12435 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12438 return rs6000_finish_function_arg (mode, rvec, k);
12440 else if (TARGET_ALTIVEC_ABI
12441 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12442 || (type && TREE_CODE (type) == VECTOR_TYPE
12443 && int_size_in_bytes (type) == 16)))
12445 if (named || abi == ABI_V4)
12446 return NULL_RTX;
12447 else
12449 /* Vector parameters to varargs functions under AIX or Darwin
12450 get passed in memory and possibly also in GPRs. */
12451 int align, align_words, n_words;
12452 machine_mode part_mode;
12454 /* Vector parameters must be 16-byte aligned. In 32-bit
12455 mode this means we need to take into account the offset
12456 to the parameter save area. In 64-bit mode, they just
12457 have to start on an even word, since the parameter save
12458 area is 16-byte aligned. */
12459 if (TARGET_32BIT)
12460 align = -(rs6000_parm_offset () + cum->words) & 3;
12461 else
12462 align = cum->words & 1;
12463 align_words = cum->words + align;
12465 /* Out of registers? Memory, then. */
12466 if (align_words >= GP_ARG_NUM_REG)
12467 return NULL_RTX;
12469 if (TARGET_32BIT && TARGET_POWERPC64)
12470 return rs6000_mixed_function_arg (mode, type, align_words);
12472 /* The vector value goes in GPRs. Only the part of the
12473 value in GPRs is reported here. */
12474 part_mode = mode;
12475 n_words = rs6000_arg_size (mode, type);
12476 if (align_words + n_words > GP_ARG_NUM_REG)
12477 /* Fortunately, there are only two possibilities, the value
12478 is either wholly in GPRs or half in GPRs and half not. */
12479 part_mode = DImode;
12481 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12485 else if (abi == ABI_V4)
12487 if (abi_v4_pass_in_fpr (mode))
12489 /* _Decimal128 must use an even/odd register pair. This assumes
12490 that the register number is odd when fregno is odd. */
12491 if (mode == TDmode && (cum->fregno % 2) == 1)
12492 cum->fregno++;
12494 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12495 <= FP_ARG_V4_MAX_REG)
12496 return gen_rtx_REG (mode, cum->fregno);
12497 else
12498 return NULL_RTX;
12500 else
12502 int n_words = rs6000_arg_size (mode, type);
12503 int gregno = cum->sysv_gregno;
12505 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12506 As does any other 2 word item such as complex int due to a
12507 historical mistake. */
12508 if (n_words == 2)
12509 gregno += (1 - gregno) & 1;
12511 /* Multi-reg args are not split between registers and stack. */
12512 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12513 return NULL_RTX;
12515 if (TARGET_32BIT && TARGET_POWERPC64)
12516 return rs6000_mixed_function_arg (mode, type,
12517 gregno - GP_ARG_MIN_REG);
12518 return gen_rtx_REG (mode, gregno);
12521 else
12523 int align_words = rs6000_parm_start (mode, type, cum->words);
12525 /* _Decimal128 must be passed in an even/odd float register pair.
12526 This assumes that the register number is odd when fregno is odd. */
12527 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12528 cum->fregno++;
12530 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12532 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12533 rtx r, off;
12534 int i, k = 0;
12535 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12536 int fpr_words;
12538 /* Do we also need to pass this argument in the parameter
12539 save area? */
12540 if (type && (cum->nargs_prototype <= 0
12541 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12542 && TARGET_XL_COMPAT
12543 && align_words >= GP_ARG_NUM_REG)))
12544 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12546 /* Describe where this argument goes in the fprs. */
12547 for (i = 0; i < n_elts
12548 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12550 /* Check if the argument is split over registers and memory.
12551 This can only ever happen for long double or _Decimal128;
12552 complex types are handled via split_complex_arg. */
12553 machine_mode fmode = elt_mode;
12554 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12556 gcc_assert (FLOAT128_2REG_P (fmode));
12557 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12560 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12561 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12562 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12565 /* If there were not enough FPRs to hold the argument, the rest
12566 usually goes into memory. However, if the current position
12567 is still within the register parameter area, a portion may
12568 actually have to go into GPRs.
12570 Note that it may happen that the portion of the argument
12571 passed in the first "half" of the first GPR was already
12572 passed in the last FPR as well.
12574 For unnamed arguments, we already set up GPRs to cover the
12575 whole argument in rs6000_psave_function_arg, so there is
12576 nothing further to do at this point. */
12577 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12578 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12579 && cum->nargs_prototype > 0)
12581 static bool warned;
12583 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12584 int n_words = rs6000_arg_size (mode, type);
12586 align_words += fpr_words;
12587 n_words -= fpr_words;
12591 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12592 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12593 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12595 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12597 if (!warned && warn_psabi)
12599 warned = true;
12600 inform (input_location,
12601 "the ABI of passing homogeneous float aggregates"
12602 " has changed in GCC 5");
12606 return rs6000_finish_function_arg (mode, rvec, k);
12608 else if (align_words < GP_ARG_NUM_REG)
12610 if (TARGET_32BIT && TARGET_POWERPC64)
12611 return rs6000_mixed_function_arg (mode, type, align_words);
12613 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12615 else
12616 return NULL_RTX;
12620 /* For an arg passed partly in registers and partly in memory, this is
12621 the number of bytes passed in registers. For args passed entirely in
12622 registers or entirely in memory, zero. When an arg is described by a
12623 PARALLEL, perhaps using more than one register type, this function
12624 returns the number of bytes used by the first element of the PARALLEL. */
12626 static int
12627 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12628 tree type, bool named)
12630 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12631 bool passed_in_gprs = true;
12632 int ret = 0;
12633 int align_words;
12634 machine_mode elt_mode;
12635 int n_elts;
12637 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12639 if (DEFAULT_ABI == ABI_V4)
12640 return 0;
12642 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12644 /* If we are passing this arg in the fixed parameter save area (gprs or
12645 memory) as well as VRs, we do not use the partial bytes mechanism;
12646 instead, rs6000_function_arg will return a PARALLEL including a memory
12647 element as necessary. Library support functions for IEEE 128-bit are
12648 assumed to not need the value passed both in GPRs and in vector
12649 registers. */
12650 if (TARGET_64BIT && !cum->prototype
12651 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12652 return 0;
12654 /* Otherwise, we pass in VRs only. Check for partial copies. */
12655 passed_in_gprs = false;
12656 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12657 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12660 /* In this complicated case we just disable the partial_nregs code. */
12661 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12662 return 0;
12664 align_words = rs6000_parm_start (mode, type, cum->words);
12666 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12668 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12670 /* If we are passing this arg in the fixed parameter save area
12671 (gprs or memory) as well as FPRs, we do not use the partial
12672 bytes mechanism; instead, rs6000_function_arg will return a
12673 PARALLEL including a memory element as necessary. */
12674 if (type
12675 && (cum->nargs_prototype <= 0
12676 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12677 && TARGET_XL_COMPAT
12678 && align_words >= GP_ARG_NUM_REG)))
12679 return 0;
12681 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12682 passed_in_gprs = false;
12683 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12685 /* Compute number of bytes / words passed in FPRs. If there
12686 is still space available in the register parameter area
12687 *after* that amount, a part of the argument will be passed
12688 in GPRs. In that case, the total amount passed in any
12689 registers is equal to the amount that would have been passed
12690 in GPRs if everything were passed there, so we fall back to
12691 the GPR code below to compute the appropriate value. */
12692 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12693 * MIN (8, GET_MODE_SIZE (elt_mode)));
12694 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12696 if (align_words + fpr_words < GP_ARG_NUM_REG)
12697 passed_in_gprs = true;
12698 else
12699 ret = fpr;
12703 if (passed_in_gprs
12704 && align_words < GP_ARG_NUM_REG
12705 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12706 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12708 if (ret != 0 && TARGET_DEBUG_ARG)
12709 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12711 return ret;
12714 /* A C expression that indicates when an argument must be passed by
12715 reference. If nonzero for an argument, a copy of that argument is
12716 made in memory and a pointer to the argument is passed instead of
12717 the argument itself. The pointer is passed in whatever way is
12718 appropriate for passing a pointer to that type.
12720 Under V.4, aggregates and long double are passed by reference.
12722 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12723 reference unless the AltiVec vector extension ABI is in force.
12725 As an extension to all ABIs, variable sized types are passed by
12726 reference. */
12728 static bool
12729 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12730 machine_mode mode, const_tree type,
12731 bool named ATTRIBUTE_UNUSED)
12733 if (!type)
12734 return 0;
12736 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12737 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12739 if (TARGET_DEBUG_ARG)
12740 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12741 return 1;
12744 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12746 if (TARGET_DEBUG_ARG)
12747 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12748 return 1;
12751 if (int_size_in_bytes (type) < 0)
12753 if (TARGET_DEBUG_ARG)
12754 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12755 return 1;
12758 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12759 modes only exist for GCC vector types if -maltivec. */
12760 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12762 if (TARGET_DEBUG_ARG)
12763 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12764 return 1;
12767 /* Pass synthetic vectors in memory. */
12768 if (TREE_CODE (type) == VECTOR_TYPE
12769 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12771 static bool warned_for_pass_big_vectors = false;
12772 if (TARGET_DEBUG_ARG)
12773 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12774 if (!warned_for_pass_big_vectors)
12776 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12777 "non-standard ABI extension with no compatibility guarantee");
12778 warned_for_pass_big_vectors = true;
12780 return 1;
12783 return 0;
12786 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12787 already processes. Return true if the parameter must be passed
12788 (fully or partially) on the stack. */
12790 static bool
12791 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12793 machine_mode mode;
12794 int unsignedp;
12795 rtx entry_parm;
12797 /* Catch errors. */
12798 if (type == NULL || type == error_mark_node)
12799 return true;
12801 /* Handle types with no storage requirement. */
12802 if (TYPE_MODE (type) == VOIDmode)
12803 return false;
12805 /* Handle complex types. */
12806 if (TREE_CODE (type) == COMPLEX_TYPE)
12807 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12808 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12810 /* Handle transparent aggregates. */
12811 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12812 && TYPE_TRANSPARENT_AGGR (type))
12813 type = TREE_TYPE (first_field (type));
12815 /* See if this arg was passed by invisible reference. */
12816 if (pass_by_reference (get_cumulative_args (args_so_far),
12817 TYPE_MODE (type), type, true))
12818 type = build_pointer_type (type);
12820 /* Find mode as it is passed by the ABI. */
12821 unsignedp = TYPE_UNSIGNED (type);
12822 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12824 /* If we must pass in stack, we need a stack. */
12825 if (rs6000_must_pass_in_stack (mode, type))
12826 return true;
12828 /* If there is no incoming register, we need a stack. */
12829 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12830 if (entry_parm == NULL)
12831 return true;
12833 /* Likewise if we need to pass both in registers and on the stack. */
12834 if (GET_CODE (entry_parm) == PARALLEL
12835 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12836 return true;
12838 /* Also true if we're partially in registers and partially not. */
12839 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12840 return true;
12842 /* Update info on where next arg arrives in registers. */
12843 rs6000_function_arg_advance (args_so_far, mode, type, true);
12844 return false;
12847 /* Return true if FUN has no prototype, has a variable argument
12848 list, or passes any parameter in memory. */
12850 static bool
12851 rs6000_function_parms_need_stack (tree fun, bool incoming)
12853 tree fntype, result;
12854 CUMULATIVE_ARGS args_so_far_v;
12855 cumulative_args_t args_so_far;
12857 if (!fun)
12858 /* Must be a libcall, all of which only use reg parms. */
12859 return false;
12861 fntype = fun;
12862 if (!TYPE_P (fun))
12863 fntype = TREE_TYPE (fun);
12865 /* Varargs functions need the parameter save area. */
12866 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12867 return true;
12869 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12870 args_so_far = pack_cumulative_args (&args_so_far_v);
12872 /* When incoming, we will have been passed the function decl.
12873 It is necessary to use the decl to handle K&R style functions,
12874 where TYPE_ARG_TYPES may not be available. */
12875 if (incoming)
12877 gcc_assert (DECL_P (fun));
12878 result = DECL_RESULT (fun);
12880 else
12881 result = TREE_TYPE (fntype);
12883 if (result && aggregate_value_p (result, fntype))
12885 if (!TYPE_P (result))
12886 result = TREE_TYPE (result);
12887 result = build_pointer_type (result);
12888 rs6000_parm_needs_stack (args_so_far, result);
12891 if (incoming)
12893 tree parm;
12895 for (parm = DECL_ARGUMENTS (fun);
12896 parm && parm != void_list_node;
12897 parm = TREE_CHAIN (parm))
12898 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12899 return true;
12901 else
12903 function_args_iterator args_iter;
12904 tree arg_type;
12906 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12907 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12908 return true;
12911 return false;
12914 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12915 usually a constant depending on the ABI. However, in the ELFv2 ABI
12916 the register parameter area is optional when calling a function that
12917 has a prototype is scope, has no variable argument list, and passes
12918 all parameters in registers. */
12921 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12923 int reg_parm_stack_space;
12925 switch (DEFAULT_ABI)
12927 default:
12928 reg_parm_stack_space = 0;
12929 break;
12931 case ABI_AIX:
12932 case ABI_DARWIN:
12933 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12934 break;
12936 case ABI_ELFv2:
12937 /* ??? Recomputing this every time is a bit expensive. Is there
12938 a place to cache this information? */
12939 if (rs6000_function_parms_need_stack (fun, incoming))
12940 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12941 else
12942 reg_parm_stack_space = 0;
12943 break;
12946 return reg_parm_stack_space;
12949 static void
12950 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12952 int i;
12953 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12955 if (nregs == 0)
12956 return;
12958 for (i = 0; i < nregs; i++)
12960 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12961 if (reload_completed)
12963 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12964 tem = NULL_RTX;
12965 else
12966 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12967 i * GET_MODE_SIZE (reg_mode));
12969 else
12970 tem = replace_equiv_address (tem, XEXP (tem, 0));
12972 gcc_assert (tem);
12974 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12978 /* Perform any needed actions needed for a function that is receiving a
12979 variable number of arguments.
12981 CUM is as above.
12983 MODE and TYPE are the mode and type of the current parameter.
12985 PRETEND_SIZE is a variable that should be set to the amount of stack
12986 that must be pushed by the prolog to pretend that our caller pushed
12989 Normally, this macro will push all remaining incoming registers on the
12990 stack and set PRETEND_SIZE to the length of the registers pushed. */
12992 static void
12993 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12994 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12995 int no_rtl)
12997 CUMULATIVE_ARGS next_cum;
12998 int reg_size = TARGET_32BIT ? 4 : 8;
12999 rtx save_area = NULL_RTX, mem;
13000 int first_reg_offset;
13001 alias_set_type set;
13003 /* Skip the last named argument. */
13004 next_cum = *get_cumulative_args (cum);
13005 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
13007 if (DEFAULT_ABI == ABI_V4)
13009 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
13011 if (! no_rtl)
13013 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
13014 HOST_WIDE_INT offset = 0;
13016 /* Try to optimize the size of the varargs save area.
13017 The ABI requires that ap.reg_save_area is doubleword
13018 aligned, but we don't need to allocate space for all
13019 the bytes, only those to which we actually will save
13020 anything. */
13021 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13022 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13023 if (TARGET_HARD_FLOAT
13024 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13025 && cfun->va_list_fpr_size)
13027 if (gpr_reg_num)
13028 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13029 * UNITS_PER_FP_WORD;
13030 if (cfun->va_list_fpr_size
13031 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13032 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13033 else
13034 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13035 * UNITS_PER_FP_WORD;
13037 if (gpr_reg_num)
13039 offset = -((first_reg_offset * reg_size) & ~7);
13040 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13042 gpr_reg_num = cfun->va_list_gpr_size;
13043 if (reg_size == 4 && (first_reg_offset & 1))
13044 gpr_reg_num++;
13046 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13048 else if (fpr_size)
13049 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13050 * UNITS_PER_FP_WORD
13051 - (int) (GP_ARG_NUM_REG * reg_size);
13053 if (gpr_size + fpr_size)
13055 rtx reg_save_area
13056 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13057 gcc_assert (GET_CODE (reg_save_area) == MEM);
13058 reg_save_area = XEXP (reg_save_area, 0);
13059 if (GET_CODE (reg_save_area) == PLUS)
13061 gcc_assert (XEXP (reg_save_area, 0)
13062 == virtual_stack_vars_rtx);
13063 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13064 offset += INTVAL (XEXP (reg_save_area, 1));
13066 else
13067 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13070 cfun->machine->varargs_save_offset = offset;
13071 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13074 else
13076 first_reg_offset = next_cum.words;
13077 save_area = crtl->args.internal_arg_pointer;
13079 if (targetm.calls.must_pass_in_stack (mode, type))
13080 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13083 set = get_varargs_alias_set ();
13084 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13085 && cfun->va_list_gpr_size)
13087 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13089 if (va_list_gpr_counter_field)
13090 /* V4 va_list_gpr_size counts number of registers needed. */
13091 n_gpr = cfun->va_list_gpr_size;
13092 else
13093 /* char * va_list instead counts number of bytes needed. */
13094 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13096 if (nregs > n_gpr)
13097 nregs = n_gpr;
13099 mem = gen_rtx_MEM (BLKmode,
13100 plus_constant (Pmode, save_area,
13101 first_reg_offset * reg_size));
13102 MEM_NOTRAP_P (mem) = 1;
13103 set_mem_alias_set (mem, set);
13104 set_mem_align (mem, BITS_PER_WORD);
13106 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13107 nregs);
13110 /* Save FP registers if needed. */
13111 if (DEFAULT_ABI == ABI_V4
13112 && TARGET_HARD_FLOAT
13113 && ! no_rtl
13114 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13115 && cfun->va_list_fpr_size)
13117 int fregno = next_cum.fregno, nregs;
13118 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13119 rtx lab = gen_label_rtx ();
13120 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13121 * UNITS_PER_FP_WORD);
13123 emit_jump_insn
13124 (gen_rtx_SET (pc_rtx,
13125 gen_rtx_IF_THEN_ELSE (VOIDmode,
13126 gen_rtx_NE (VOIDmode, cr1,
13127 const0_rtx),
13128 gen_rtx_LABEL_REF (VOIDmode, lab),
13129 pc_rtx)));
13131 for (nregs = 0;
13132 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13133 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13135 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13136 ? DFmode : SFmode,
13137 plus_constant (Pmode, save_area, off));
13138 MEM_NOTRAP_P (mem) = 1;
13139 set_mem_alias_set (mem, set);
13140 set_mem_align (mem, GET_MODE_ALIGNMENT (
13141 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13142 ? DFmode : SFmode));
13143 emit_move_insn (mem, gen_rtx_REG (
13144 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13145 ? DFmode : SFmode, fregno));
13148 emit_label (lab);
13152 /* Create the va_list data type. */
13154 static tree
13155 rs6000_build_builtin_va_list (void)
13157 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13159 /* For AIX, prefer 'char *' because that's what the system
13160 header files like. */
13161 if (DEFAULT_ABI != ABI_V4)
13162 return build_pointer_type (char_type_node);
13164 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13165 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13166 get_identifier ("__va_list_tag"), record);
13168 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13169 unsigned_char_type_node);
13170 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13171 unsigned_char_type_node);
13172 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13173 every user file. */
13174 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13175 get_identifier ("reserved"), short_unsigned_type_node);
13176 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13177 get_identifier ("overflow_arg_area"),
13178 ptr_type_node);
13179 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13180 get_identifier ("reg_save_area"),
13181 ptr_type_node);
13183 va_list_gpr_counter_field = f_gpr;
13184 va_list_fpr_counter_field = f_fpr;
13186 DECL_FIELD_CONTEXT (f_gpr) = record;
13187 DECL_FIELD_CONTEXT (f_fpr) = record;
13188 DECL_FIELD_CONTEXT (f_res) = record;
13189 DECL_FIELD_CONTEXT (f_ovf) = record;
13190 DECL_FIELD_CONTEXT (f_sav) = record;
13192 TYPE_STUB_DECL (record) = type_decl;
13193 TYPE_NAME (record) = type_decl;
13194 TYPE_FIELDS (record) = f_gpr;
13195 DECL_CHAIN (f_gpr) = f_fpr;
13196 DECL_CHAIN (f_fpr) = f_res;
13197 DECL_CHAIN (f_res) = f_ovf;
13198 DECL_CHAIN (f_ovf) = f_sav;
13200 layout_type (record);
13202 /* The correct type is an array type of one element. */
13203 return build_array_type (record, build_index_type (size_zero_node));
13206 /* Implement va_start. */
13208 static void
13209 rs6000_va_start (tree valist, rtx nextarg)
13211 HOST_WIDE_INT words, n_gpr, n_fpr;
13212 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13213 tree gpr, fpr, ovf, sav, t;
13215 /* Only SVR4 needs something special. */
13216 if (DEFAULT_ABI != ABI_V4)
13218 std_expand_builtin_va_start (valist, nextarg);
13219 return;
13222 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13223 f_fpr = DECL_CHAIN (f_gpr);
13224 f_res = DECL_CHAIN (f_fpr);
13225 f_ovf = DECL_CHAIN (f_res);
13226 f_sav = DECL_CHAIN (f_ovf);
13228 valist = build_simple_mem_ref (valist);
13229 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13230 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13231 f_fpr, NULL_TREE);
13232 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13233 f_ovf, NULL_TREE);
13234 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13235 f_sav, NULL_TREE);
13237 /* Count number of gp and fp argument registers used. */
13238 words = crtl->args.info.words;
13239 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13240 GP_ARG_NUM_REG);
13241 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13242 FP_ARG_NUM_REG);
13244 if (TARGET_DEBUG_ARG)
13245 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13246 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13247 words, n_gpr, n_fpr);
13249 if (cfun->va_list_gpr_size)
13251 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13252 build_int_cst (NULL_TREE, n_gpr));
13253 TREE_SIDE_EFFECTS (t) = 1;
13254 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13257 if (cfun->va_list_fpr_size)
13259 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13260 build_int_cst (NULL_TREE, n_fpr));
13261 TREE_SIDE_EFFECTS (t) = 1;
13262 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13264 #ifdef HAVE_AS_GNU_ATTRIBUTE
13265 if (call_ABI_of_interest (cfun->decl))
13266 rs6000_passes_float = true;
13267 #endif
13270 /* Find the overflow area. */
13271 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13272 if (words != 0)
13273 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13274 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13275 TREE_SIDE_EFFECTS (t) = 1;
13276 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13278 /* If there were no va_arg invocations, don't set up the register
13279 save area. */
13280 if (!cfun->va_list_gpr_size
13281 && !cfun->va_list_fpr_size
13282 && n_gpr < GP_ARG_NUM_REG
13283 && n_fpr < FP_ARG_V4_MAX_REG)
13284 return;
13286 /* Find the register save area. */
13287 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13288 if (cfun->machine->varargs_save_offset)
13289 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13290 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13291 TREE_SIDE_EFFECTS (t) = 1;
13292 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13295 /* Implement va_arg. */
13297 static tree
13298 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13299 gimple_seq *post_p)
13301 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13302 tree gpr, fpr, ovf, sav, reg, t, u;
13303 int size, rsize, n_reg, sav_ofs, sav_scale;
13304 tree lab_false, lab_over, addr;
13305 int align;
13306 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13307 int regalign = 0;
13308 gimple *stmt;
13310 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13312 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13313 return build_va_arg_indirect_ref (t);
13316 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13317 earlier version of gcc, with the property that it always applied alignment
13318 adjustments to the va-args (even for zero-sized types). The cheapest way
13319 to deal with this is to replicate the effect of the part of
13320 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13321 of relevance.
13322 We don't need to check for pass-by-reference because of the test above.
13323 We can return a simplifed answer, since we know there's no offset to add. */
13325 if (((TARGET_MACHO
13326 && rs6000_darwin64_abi)
13327 || DEFAULT_ABI == ABI_ELFv2
13328 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13329 && integer_zerop (TYPE_SIZE (type)))
13331 unsigned HOST_WIDE_INT align, boundary;
13332 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13333 align = PARM_BOUNDARY / BITS_PER_UNIT;
13334 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13335 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13336 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13337 boundary /= BITS_PER_UNIT;
13338 if (boundary > align)
13340 tree t ;
13341 /* This updates arg ptr by the amount that would be necessary
13342 to align the zero-sized (but not zero-alignment) item. */
13343 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13344 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13345 gimplify_and_add (t, pre_p);
13347 t = fold_convert (sizetype, valist_tmp);
13348 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13349 fold_convert (TREE_TYPE (valist),
13350 fold_build2 (BIT_AND_EXPR, sizetype, t,
13351 size_int (-boundary))));
13352 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13353 gimplify_and_add (t, pre_p);
13355 /* Since it is zero-sized there's no increment for the item itself. */
13356 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13357 return build_va_arg_indirect_ref (valist_tmp);
13360 if (DEFAULT_ABI != ABI_V4)
13362 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13364 tree elem_type = TREE_TYPE (type);
13365 machine_mode elem_mode = TYPE_MODE (elem_type);
13366 int elem_size = GET_MODE_SIZE (elem_mode);
13368 if (elem_size < UNITS_PER_WORD)
13370 tree real_part, imag_part;
13371 gimple_seq post = NULL;
13373 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13374 &post);
13375 /* Copy the value into a temporary, lest the formal temporary
13376 be reused out from under us. */
13377 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13378 gimple_seq_add_seq (pre_p, post);
13380 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13381 post_p);
13383 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13387 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13390 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13391 f_fpr = DECL_CHAIN (f_gpr);
13392 f_res = DECL_CHAIN (f_fpr);
13393 f_ovf = DECL_CHAIN (f_res);
13394 f_sav = DECL_CHAIN (f_ovf);
13396 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13397 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13398 f_fpr, NULL_TREE);
13399 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13400 f_ovf, NULL_TREE);
13401 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13402 f_sav, NULL_TREE);
13404 size = int_size_in_bytes (type);
13405 rsize = (size + 3) / 4;
13406 int pad = 4 * rsize - size;
13407 align = 1;
13409 machine_mode mode = TYPE_MODE (type);
13410 if (abi_v4_pass_in_fpr (mode))
13412 /* FP args go in FP registers, if present. */
13413 reg = fpr;
13414 n_reg = (size + 7) / 8;
13415 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13416 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13417 if (mode != SFmode && mode != SDmode)
13418 align = 8;
13420 else
13422 /* Otherwise into GP registers. */
13423 reg = gpr;
13424 n_reg = rsize;
13425 sav_ofs = 0;
13426 sav_scale = 4;
13427 if (n_reg == 2)
13428 align = 8;
13431 /* Pull the value out of the saved registers.... */
13433 lab_over = NULL;
13434 addr = create_tmp_var (ptr_type_node, "addr");
13436 /* AltiVec vectors never go in registers when -mabi=altivec. */
13437 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13438 align = 16;
13439 else
13441 lab_false = create_artificial_label (input_location);
13442 lab_over = create_artificial_label (input_location);
13444 /* Long long is aligned in the registers. As are any other 2 gpr
13445 item such as complex int due to a historical mistake. */
13446 u = reg;
13447 if (n_reg == 2 && reg == gpr)
13449 regalign = 1;
13450 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13451 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13452 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13453 unshare_expr (reg), u);
13455 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13456 reg number is 0 for f1, so we want to make it odd. */
13457 else if (reg == fpr && mode == TDmode)
13459 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13460 build_int_cst (TREE_TYPE (reg), 1));
13461 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13464 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13465 t = build2 (GE_EXPR, boolean_type_node, u, t);
13466 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13467 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13468 gimplify_and_add (t, pre_p);
13470 t = sav;
13471 if (sav_ofs)
13472 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13474 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13475 build_int_cst (TREE_TYPE (reg), n_reg));
13476 u = fold_convert (sizetype, u);
13477 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13478 t = fold_build_pointer_plus (t, u);
13480 /* _Decimal32 varargs are located in the second word of the 64-bit
13481 FP register for 32-bit binaries. */
13482 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13483 t = fold_build_pointer_plus_hwi (t, size);
13485 /* Args are passed right-aligned. */
13486 if (BYTES_BIG_ENDIAN)
13487 t = fold_build_pointer_plus_hwi (t, pad);
13489 gimplify_assign (addr, t, pre_p);
13491 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13493 stmt = gimple_build_label (lab_false);
13494 gimple_seq_add_stmt (pre_p, stmt);
13496 if ((n_reg == 2 && !regalign) || n_reg > 2)
13498 /* Ensure that we don't find any more args in regs.
13499 Alignment has taken care of for special cases. */
13500 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13504 /* ... otherwise out of the overflow area. */
13506 /* Care for on-stack alignment if needed. */
13507 t = ovf;
13508 if (align != 1)
13510 t = fold_build_pointer_plus_hwi (t, align - 1);
13511 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13512 build_int_cst (TREE_TYPE (t), -align));
13515 /* Args are passed right-aligned. */
13516 if (BYTES_BIG_ENDIAN)
13517 t = fold_build_pointer_plus_hwi (t, pad);
13519 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13521 gimplify_assign (unshare_expr (addr), t, pre_p);
13523 t = fold_build_pointer_plus_hwi (t, size);
13524 gimplify_assign (unshare_expr (ovf), t, pre_p);
13526 if (lab_over)
13528 stmt = gimple_build_label (lab_over);
13529 gimple_seq_add_stmt (pre_p, stmt);
13532 if (STRICT_ALIGNMENT
13533 && (TYPE_ALIGN (type)
13534 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13536 /* The value (of type complex double, for example) may not be
13537 aligned in memory in the saved registers, so copy via a
13538 temporary. (This is the same code as used for SPARC.) */
13539 tree tmp = create_tmp_var (type, "va_arg_tmp");
13540 tree dest_addr = build_fold_addr_expr (tmp);
13542 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13543 3, dest_addr, addr, size_int (rsize * 4));
13545 gimplify_and_add (copy, pre_p);
13546 addr = dest_addr;
13549 addr = fold_convert (ptrtype, addr);
13550 return build_va_arg_indirect_ref (addr);
13553 /* Builtins. */
13555 static void
13556 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13558 tree t;
13559 unsigned classify = rs6000_builtin_info[(int)code].attr;
13560 const char *attr_string = "";
13562 gcc_assert (name != NULL);
13563 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13565 if (rs6000_builtin_decls[(int)code])
13566 fatal_error (input_location,
13567 "internal error: builtin function %s already processed", name);
13569 rs6000_builtin_decls[(int)code] = t =
13570 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13572 /* Set any special attributes. */
13573 if ((classify & RS6000_BTC_CONST) != 0)
13575 /* const function, function only depends on the inputs. */
13576 TREE_READONLY (t) = 1;
13577 TREE_NOTHROW (t) = 1;
13578 attr_string = ", const";
13580 else if ((classify & RS6000_BTC_PURE) != 0)
13582 /* pure function, function can read global memory, but does not set any
13583 external state. */
13584 DECL_PURE_P (t) = 1;
13585 TREE_NOTHROW (t) = 1;
13586 attr_string = ", pure";
13588 else if ((classify & RS6000_BTC_FP) != 0)
13590 /* Function is a math function. If rounding mode is on, then treat the
13591 function as not reading global memory, but it can have arbitrary side
13592 effects. If it is off, then assume the function is a const function.
13593 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13594 builtin-attribute.def that is used for the math functions. */
13595 TREE_NOTHROW (t) = 1;
13596 if (flag_rounding_math)
13598 DECL_PURE_P (t) = 1;
13599 DECL_IS_NOVOPS (t) = 1;
13600 attr_string = ", fp, pure";
13602 else
13604 TREE_READONLY (t) = 1;
13605 attr_string = ", fp, const";
13608 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13609 gcc_unreachable ();
13611 if (TARGET_DEBUG_BUILTIN)
13612 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13613 (int)code, name, attr_string);
13616 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13618 #undef RS6000_BUILTIN_0
13619 #undef RS6000_BUILTIN_1
13620 #undef RS6000_BUILTIN_2
13621 #undef RS6000_BUILTIN_3
13622 #undef RS6000_BUILTIN_A
13623 #undef RS6000_BUILTIN_D
13624 #undef RS6000_BUILTIN_H
13625 #undef RS6000_BUILTIN_P
13626 #undef RS6000_BUILTIN_Q
13627 #undef RS6000_BUILTIN_X
13629 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13630 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13631 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13632 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13633 { MASK, ICODE, NAME, ENUM },
13635 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13636 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13637 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13638 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13639 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13640 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13642 static const struct builtin_description bdesc_3arg[] =
13644 #include "rs6000-builtin.def"
13647 /* DST operations: void foo (void *, const int, const char). */
13649 #undef RS6000_BUILTIN_0
13650 #undef RS6000_BUILTIN_1
13651 #undef RS6000_BUILTIN_2
13652 #undef RS6000_BUILTIN_3
13653 #undef RS6000_BUILTIN_A
13654 #undef RS6000_BUILTIN_D
13655 #undef RS6000_BUILTIN_H
13656 #undef RS6000_BUILTIN_P
13657 #undef RS6000_BUILTIN_Q
13658 #undef RS6000_BUILTIN_X
13660 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13661 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13662 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13663 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13664 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13665 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13666 { MASK, ICODE, NAME, ENUM },
13668 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13669 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13670 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13671 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13673 static const struct builtin_description bdesc_dst[] =
13675 #include "rs6000-builtin.def"
13678 /* Simple binary operations: VECc = foo (VECa, VECb). */
13680 #undef RS6000_BUILTIN_0
13681 #undef RS6000_BUILTIN_1
13682 #undef RS6000_BUILTIN_2
13683 #undef RS6000_BUILTIN_3
13684 #undef RS6000_BUILTIN_A
13685 #undef RS6000_BUILTIN_D
13686 #undef RS6000_BUILTIN_H
13687 #undef RS6000_BUILTIN_P
13688 #undef RS6000_BUILTIN_Q
13689 #undef RS6000_BUILTIN_X
13691 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13692 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13693 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13694 { MASK, ICODE, NAME, ENUM },
13696 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13697 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13698 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13699 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13700 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13701 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13702 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13704 static const struct builtin_description bdesc_2arg[] =
13706 #include "rs6000-builtin.def"
13709 #undef RS6000_BUILTIN_0
13710 #undef RS6000_BUILTIN_1
13711 #undef RS6000_BUILTIN_2
13712 #undef RS6000_BUILTIN_3
13713 #undef RS6000_BUILTIN_A
13714 #undef RS6000_BUILTIN_D
13715 #undef RS6000_BUILTIN_H
13716 #undef RS6000_BUILTIN_P
13717 #undef RS6000_BUILTIN_Q
13718 #undef RS6000_BUILTIN_X
13720 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13721 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13722 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13723 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13724 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13725 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13726 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13727 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13728 { MASK, ICODE, NAME, ENUM },
13730 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13731 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13733 /* AltiVec predicates. */
13735 static const struct builtin_description bdesc_altivec_preds[] =
13737 #include "rs6000-builtin.def"
13740 /* PAIRED predicates. */
13741 #undef RS6000_BUILTIN_0
13742 #undef RS6000_BUILTIN_1
13743 #undef RS6000_BUILTIN_2
13744 #undef RS6000_BUILTIN_3
13745 #undef RS6000_BUILTIN_A
13746 #undef RS6000_BUILTIN_D
13747 #undef RS6000_BUILTIN_H
13748 #undef RS6000_BUILTIN_P
13749 #undef RS6000_BUILTIN_Q
13750 #undef RS6000_BUILTIN_X
13752 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13753 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13754 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13755 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13756 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13757 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13758 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13759 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13760 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13761 { MASK, ICODE, NAME, ENUM },
13763 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13765 static const struct builtin_description bdesc_paired_preds[] =
13767 #include "rs6000-builtin.def"
13770 /* ABS* operations. */
13772 #undef RS6000_BUILTIN_0
13773 #undef RS6000_BUILTIN_1
13774 #undef RS6000_BUILTIN_2
13775 #undef RS6000_BUILTIN_3
13776 #undef RS6000_BUILTIN_A
13777 #undef RS6000_BUILTIN_D
13778 #undef RS6000_BUILTIN_H
13779 #undef RS6000_BUILTIN_P
13780 #undef RS6000_BUILTIN_Q
13781 #undef RS6000_BUILTIN_X
13783 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13784 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13785 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13786 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13787 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13788 { MASK, ICODE, NAME, ENUM },
13790 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13791 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13792 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13793 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13794 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13796 static const struct builtin_description bdesc_abs[] =
13798 #include "rs6000-builtin.def"
13801 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13802 foo (VECa). */
13804 #undef RS6000_BUILTIN_0
13805 #undef RS6000_BUILTIN_1
13806 #undef RS6000_BUILTIN_2
13807 #undef RS6000_BUILTIN_3
13808 #undef RS6000_BUILTIN_A
13809 #undef RS6000_BUILTIN_D
13810 #undef RS6000_BUILTIN_H
13811 #undef RS6000_BUILTIN_P
13812 #undef RS6000_BUILTIN_Q
13813 #undef RS6000_BUILTIN_X
13815 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13816 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13817 { MASK, ICODE, NAME, ENUM },
13819 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13820 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13821 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13822 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13823 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13824 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13825 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13826 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13828 static const struct builtin_description bdesc_1arg[] =
13830 #include "rs6000-builtin.def"
13833 /* Simple no-argument operations: result = __builtin_darn_32 () */
13835 #undef RS6000_BUILTIN_0
13836 #undef RS6000_BUILTIN_1
13837 #undef RS6000_BUILTIN_2
13838 #undef RS6000_BUILTIN_3
13839 #undef RS6000_BUILTIN_A
13840 #undef RS6000_BUILTIN_D
13841 #undef RS6000_BUILTIN_H
13842 #undef RS6000_BUILTIN_P
13843 #undef RS6000_BUILTIN_Q
13844 #undef RS6000_BUILTIN_X
13846 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13847 { MASK, ICODE, NAME, ENUM },
13849 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13850 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13851 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13852 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13853 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13854 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13855 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13856 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13857 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13859 static const struct builtin_description bdesc_0arg[] =
13861 #include "rs6000-builtin.def"
13864 /* HTM builtins. */
13865 #undef RS6000_BUILTIN_0
13866 #undef RS6000_BUILTIN_1
13867 #undef RS6000_BUILTIN_2
13868 #undef RS6000_BUILTIN_3
13869 #undef RS6000_BUILTIN_A
13870 #undef RS6000_BUILTIN_D
13871 #undef RS6000_BUILTIN_H
13872 #undef RS6000_BUILTIN_P
13873 #undef RS6000_BUILTIN_Q
13874 #undef RS6000_BUILTIN_X
13876 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13877 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13878 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13879 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13880 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13881 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13882 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13883 { MASK, ICODE, NAME, ENUM },
13885 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13886 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13887 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13889 static const struct builtin_description bdesc_htm[] =
13891 #include "rs6000-builtin.def"
13894 #undef RS6000_BUILTIN_0
13895 #undef RS6000_BUILTIN_1
13896 #undef RS6000_BUILTIN_2
13897 #undef RS6000_BUILTIN_3
13898 #undef RS6000_BUILTIN_A
13899 #undef RS6000_BUILTIN_D
13900 #undef RS6000_BUILTIN_H
13901 #undef RS6000_BUILTIN_P
13902 #undef RS6000_BUILTIN_Q
13904 /* Return true if a builtin function is overloaded. */
13905 bool
13906 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13908 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13911 const char *
13912 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13914 return rs6000_builtin_info[(int)fncode].name;
13917 /* Expand an expression EXP that calls a builtin without arguments. */
13918 static rtx
13919 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13921 rtx pat;
13922 machine_mode tmode = insn_data[icode].operand[0].mode;
13924 if (icode == CODE_FOR_nothing)
13925 /* Builtin not supported on this processor. */
13926 return 0;
13928 if (target == 0
13929 || GET_MODE (target) != tmode
13930 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13931 target = gen_reg_rtx (tmode);
13933 pat = GEN_FCN (icode) (target);
13934 if (! pat)
13935 return 0;
13936 emit_insn (pat);
13938 return target;
13942 static rtx
13943 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13945 rtx pat;
13946 tree arg0 = CALL_EXPR_ARG (exp, 0);
13947 tree arg1 = CALL_EXPR_ARG (exp, 1);
13948 rtx op0 = expand_normal (arg0);
13949 rtx op1 = expand_normal (arg1);
13950 machine_mode mode0 = insn_data[icode].operand[0].mode;
13951 machine_mode mode1 = insn_data[icode].operand[1].mode;
13953 if (icode == CODE_FOR_nothing)
13954 /* Builtin not supported on this processor. */
13955 return 0;
13957 /* If we got invalid arguments bail out before generating bad rtl. */
13958 if (arg0 == error_mark_node || arg1 == error_mark_node)
13959 return const0_rtx;
13961 if (GET_CODE (op0) != CONST_INT
13962 || INTVAL (op0) > 255
13963 || INTVAL (op0) < 0)
13965 error ("argument 1 must be an 8-bit field value");
13966 return const0_rtx;
13969 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13970 op0 = copy_to_mode_reg (mode0, op0);
13972 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13973 op1 = copy_to_mode_reg (mode1, op1);
13975 pat = GEN_FCN (icode) (op0, op1);
13976 if (! pat)
13977 return const0_rtx;
13978 emit_insn (pat);
13980 return NULL_RTX;
13983 static rtx
13984 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13986 rtx pat;
13987 tree arg0 = CALL_EXPR_ARG (exp, 0);
13988 rtx op0 = expand_normal (arg0);
13989 machine_mode tmode = insn_data[icode].operand[0].mode;
13990 machine_mode mode0 = insn_data[icode].operand[1].mode;
13992 if (icode == CODE_FOR_nothing)
13993 /* Builtin not supported on this processor. */
13994 return 0;
13996 /* If we got invalid arguments bail out before generating bad rtl. */
13997 if (arg0 == error_mark_node)
13998 return const0_rtx;
14000 if (icode == CODE_FOR_altivec_vspltisb
14001 || icode == CODE_FOR_altivec_vspltish
14002 || icode == CODE_FOR_altivec_vspltisw)
14004 /* Only allow 5-bit *signed* literals. */
14005 if (GET_CODE (op0) != CONST_INT
14006 || INTVAL (op0) > 15
14007 || INTVAL (op0) < -16)
14009 error ("argument 1 must be a 5-bit signed literal");
14010 return CONST0_RTX (tmode);
14014 if (target == 0
14015 || GET_MODE (target) != tmode
14016 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14017 target = gen_reg_rtx (tmode);
14019 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14020 op0 = copy_to_mode_reg (mode0, op0);
14022 pat = GEN_FCN (icode) (target, op0);
14023 if (! pat)
14024 return 0;
14025 emit_insn (pat);
14027 return target;
14030 static rtx
14031 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14033 rtx pat, scratch1, scratch2;
14034 tree arg0 = CALL_EXPR_ARG (exp, 0);
14035 rtx op0 = expand_normal (arg0);
14036 machine_mode tmode = insn_data[icode].operand[0].mode;
14037 machine_mode mode0 = insn_data[icode].operand[1].mode;
14039 /* If we have invalid arguments, bail out before generating bad rtl. */
14040 if (arg0 == error_mark_node)
14041 return const0_rtx;
14043 if (target == 0
14044 || GET_MODE (target) != tmode
14045 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14046 target = gen_reg_rtx (tmode);
14048 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14049 op0 = copy_to_mode_reg (mode0, op0);
14051 scratch1 = gen_reg_rtx (mode0);
14052 scratch2 = gen_reg_rtx (mode0);
14054 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14055 if (! pat)
14056 return 0;
14057 emit_insn (pat);
14059 return target;
14062 static rtx
14063 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14065 rtx pat;
14066 tree arg0 = CALL_EXPR_ARG (exp, 0);
14067 tree arg1 = CALL_EXPR_ARG (exp, 1);
14068 rtx op0 = expand_normal (arg0);
14069 rtx op1 = expand_normal (arg1);
14070 machine_mode tmode = insn_data[icode].operand[0].mode;
14071 machine_mode mode0 = insn_data[icode].operand[1].mode;
14072 machine_mode mode1 = insn_data[icode].operand[2].mode;
14074 if (icode == CODE_FOR_nothing)
14075 /* Builtin not supported on this processor. */
14076 return 0;
14078 /* If we got invalid arguments bail out before generating bad rtl. */
14079 if (arg0 == error_mark_node || arg1 == error_mark_node)
14080 return const0_rtx;
14082 if (icode == CODE_FOR_altivec_vcfux
14083 || icode == CODE_FOR_altivec_vcfsx
14084 || icode == CODE_FOR_altivec_vctsxs
14085 || icode == CODE_FOR_altivec_vctuxs
14086 || icode == CODE_FOR_altivec_vspltb
14087 || icode == CODE_FOR_altivec_vsplth
14088 || icode == CODE_FOR_altivec_vspltw)
14090 /* Only allow 5-bit unsigned literals. */
14091 STRIP_NOPS (arg1);
14092 if (TREE_CODE (arg1) != INTEGER_CST
14093 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14095 error ("argument 2 must be a 5-bit unsigned literal");
14096 return CONST0_RTX (tmode);
14099 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14100 || icode == CODE_FOR_dfptstsfi_lt_dd
14101 || icode == CODE_FOR_dfptstsfi_gt_dd
14102 || icode == CODE_FOR_dfptstsfi_unordered_dd
14103 || icode == CODE_FOR_dfptstsfi_eq_td
14104 || icode == CODE_FOR_dfptstsfi_lt_td
14105 || icode == CODE_FOR_dfptstsfi_gt_td
14106 || icode == CODE_FOR_dfptstsfi_unordered_td)
14108 /* Only allow 6-bit unsigned literals. */
14109 STRIP_NOPS (arg0);
14110 if (TREE_CODE (arg0) != INTEGER_CST
14111 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14113 error ("argument 1 must be a 6-bit unsigned literal");
14114 return CONST0_RTX (tmode);
14117 else if (icode == CODE_FOR_xststdcqp
14118 || icode == CODE_FOR_xststdcdp
14119 || icode == CODE_FOR_xststdcsp
14120 || icode == CODE_FOR_xvtstdcdp
14121 || icode == CODE_FOR_xvtstdcsp)
14123 /* Only allow 7-bit unsigned literals. */
14124 STRIP_NOPS (arg1);
14125 if (TREE_CODE (arg1) != INTEGER_CST
14126 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14128 error ("argument 2 must be a 7-bit unsigned literal");
14129 return CONST0_RTX (tmode);
14133 if (target == 0
14134 || GET_MODE (target) != tmode
14135 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14136 target = gen_reg_rtx (tmode);
14138 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14139 op0 = copy_to_mode_reg (mode0, op0);
14140 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14141 op1 = copy_to_mode_reg (mode1, op1);
14143 pat = GEN_FCN (icode) (target, op0, op1);
14144 if (! pat)
14145 return 0;
14146 emit_insn (pat);
14148 return target;
14151 static rtx
14152 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14154 rtx pat, scratch;
14155 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14156 tree arg0 = CALL_EXPR_ARG (exp, 1);
14157 tree arg1 = CALL_EXPR_ARG (exp, 2);
14158 rtx op0 = expand_normal (arg0);
14159 rtx op1 = expand_normal (arg1);
14160 machine_mode tmode = SImode;
14161 machine_mode mode0 = insn_data[icode].operand[1].mode;
14162 machine_mode mode1 = insn_data[icode].operand[2].mode;
14163 int cr6_form_int;
14165 if (TREE_CODE (cr6_form) != INTEGER_CST)
14167 error ("argument 1 of __builtin_altivec_predicate must be a constant");
14168 return const0_rtx;
14170 else
14171 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14173 gcc_assert (mode0 == mode1);
14175 /* If we have invalid arguments, bail out before generating bad rtl. */
14176 if (arg0 == error_mark_node || arg1 == error_mark_node)
14177 return const0_rtx;
14179 if (target == 0
14180 || GET_MODE (target) != tmode
14181 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14182 target = gen_reg_rtx (tmode);
14184 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14185 op0 = copy_to_mode_reg (mode0, op0);
14186 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14187 op1 = copy_to_mode_reg (mode1, op1);
14189 /* Note that for many of the relevant operations (e.g. cmpne or
14190 cmpeq) with float or double operands, it makes more sense for the
14191 mode of the allocated scratch register to select a vector of
14192 integer. But the choice to copy the mode of operand 0 was made
14193 long ago and there are no plans to change it. */
14194 scratch = gen_reg_rtx (mode0);
14196 pat = GEN_FCN (icode) (scratch, op0, op1);
14197 if (! pat)
14198 return 0;
14199 emit_insn (pat);
14201 /* The vec_any* and vec_all* predicates use the same opcodes for two
14202 different operations, but the bits in CR6 will be different
14203 depending on what information we want. So we have to play tricks
14204 with CR6 to get the right bits out.
14206 If you think this is disgusting, look at the specs for the
14207 AltiVec predicates. */
14209 switch (cr6_form_int)
14211 case 0:
14212 emit_insn (gen_cr6_test_for_zero (target));
14213 break;
14214 case 1:
14215 emit_insn (gen_cr6_test_for_zero_reverse (target));
14216 break;
14217 case 2:
14218 emit_insn (gen_cr6_test_for_lt (target));
14219 break;
14220 case 3:
14221 emit_insn (gen_cr6_test_for_lt_reverse (target));
14222 break;
14223 default:
14224 error ("argument 1 of __builtin_altivec_predicate is out of range");
14225 break;
14228 return target;
14231 static rtx
14232 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14234 rtx pat, addr;
14235 tree arg0 = CALL_EXPR_ARG (exp, 0);
14236 tree arg1 = CALL_EXPR_ARG (exp, 1);
14237 machine_mode tmode = insn_data[icode].operand[0].mode;
14238 machine_mode mode0 = Pmode;
14239 machine_mode mode1 = Pmode;
14240 rtx op0 = expand_normal (arg0);
14241 rtx op1 = expand_normal (arg1);
14243 if (icode == CODE_FOR_nothing)
14244 /* Builtin not supported on this processor. */
14245 return 0;
14247 /* If we got invalid arguments bail out before generating bad rtl. */
14248 if (arg0 == error_mark_node || arg1 == error_mark_node)
14249 return const0_rtx;
14251 if (target == 0
14252 || GET_MODE (target) != tmode
14253 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14254 target = gen_reg_rtx (tmode);
14256 op1 = copy_to_mode_reg (mode1, op1);
14258 if (op0 == const0_rtx)
14260 addr = gen_rtx_MEM (tmode, op1);
14262 else
14264 op0 = copy_to_mode_reg (mode0, op0);
14265 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14268 pat = GEN_FCN (icode) (target, addr);
14270 if (! pat)
14271 return 0;
14272 emit_insn (pat);
14274 return target;
14277 /* Return a constant vector for use as a little-endian permute control vector
14278 to reverse the order of elements of the given vector mode. */
14279 static rtx
14280 swap_selector_for_mode (machine_mode mode)
14282 /* These are little endian vectors, so their elements are reversed
14283 from what you would normally expect for a permute control vector. */
14284 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14285 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14286 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14287 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14288 unsigned int *swaparray, i;
14289 rtx perm[16];
14291 switch (mode)
14293 case V2DFmode:
14294 case V2DImode:
14295 swaparray = swap2;
14296 break;
14297 case V4SFmode:
14298 case V4SImode:
14299 swaparray = swap4;
14300 break;
14301 case V8HImode:
14302 swaparray = swap8;
14303 break;
14304 case V16QImode:
14305 swaparray = swap16;
14306 break;
14307 default:
14308 gcc_unreachable ();
14311 for (i = 0; i < 16; ++i)
14312 perm[i] = GEN_INT (swaparray[i]);
14314 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14317 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14318 with -maltivec=be specified. Issue the load followed by an element-
14319 reversing permute. */
14320 void
14321 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14323 rtx tmp = gen_reg_rtx (mode);
14324 rtx load = gen_rtx_SET (tmp, op1);
14325 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14326 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14327 rtx sel = swap_selector_for_mode (mode);
14328 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14330 gcc_assert (REG_P (op0));
14331 emit_insn (par);
14332 emit_insn (gen_rtx_SET (op0, vperm));
14335 /* Generate code for a "stvxl" built-in for a little endian target with
14336 -maltivec=be specified. Issue the store preceded by an element-reversing
14337 permute. */
14338 void
14339 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14341 rtx tmp = gen_reg_rtx (mode);
14342 rtx store = gen_rtx_SET (op0, tmp);
14343 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14344 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14345 rtx sel = swap_selector_for_mode (mode);
14346 rtx vperm;
14348 gcc_assert (REG_P (op1));
14349 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14350 emit_insn (gen_rtx_SET (tmp, vperm));
14351 emit_insn (par);
14354 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14355 specified. Issue the store preceded by an element-reversing permute. */
14356 void
14357 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14359 machine_mode inner_mode = GET_MODE_INNER (mode);
14360 rtx tmp = gen_reg_rtx (mode);
14361 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14362 rtx sel = swap_selector_for_mode (mode);
14363 rtx vperm;
14365 gcc_assert (REG_P (op1));
14366 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14367 emit_insn (gen_rtx_SET (tmp, vperm));
14368 emit_insn (gen_rtx_SET (op0, stvx));
14371 static rtx
14372 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14374 rtx pat, addr;
14375 tree arg0 = CALL_EXPR_ARG (exp, 0);
14376 tree arg1 = CALL_EXPR_ARG (exp, 1);
14377 machine_mode tmode = insn_data[icode].operand[0].mode;
14378 machine_mode mode0 = Pmode;
14379 machine_mode mode1 = Pmode;
14380 rtx op0 = expand_normal (arg0);
14381 rtx op1 = expand_normal (arg1);
14383 if (icode == CODE_FOR_nothing)
14384 /* Builtin not supported on this processor. */
14385 return 0;
14387 /* If we got invalid arguments bail out before generating bad rtl. */
14388 if (arg0 == error_mark_node || arg1 == error_mark_node)
14389 return const0_rtx;
14391 if (target == 0
14392 || GET_MODE (target) != tmode
14393 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14394 target = gen_reg_rtx (tmode);
14396 op1 = copy_to_mode_reg (mode1, op1);
14398 /* For LVX, express the RTL accurately by ANDing the address with -16.
14399 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14400 so the raw address is fine. */
14401 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14402 || icode == CODE_FOR_altivec_lvx_v2di_2op
14403 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14404 || icode == CODE_FOR_altivec_lvx_v4si_2op
14405 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14406 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14408 rtx rawaddr;
14409 if (op0 == const0_rtx)
14410 rawaddr = op1;
14411 else
14413 op0 = copy_to_mode_reg (mode0, op0);
14414 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14416 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14417 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14419 /* For -maltivec=be, emit the load and follow it up with a
14420 permute to swap the elements. */
14421 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14423 rtx temp = gen_reg_rtx (tmode);
14424 emit_insn (gen_rtx_SET (temp, addr));
14426 rtx sel = swap_selector_for_mode (tmode);
14427 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14428 UNSPEC_VPERM);
14429 emit_insn (gen_rtx_SET (target, vperm));
14431 else
14432 emit_insn (gen_rtx_SET (target, addr));
14434 else
14436 if (op0 == const0_rtx)
14437 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14438 else
14440 op0 = copy_to_mode_reg (mode0, op0);
14441 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14442 gen_rtx_PLUS (Pmode, op1, op0));
14445 pat = GEN_FCN (icode) (target, addr);
14446 if (! pat)
14447 return 0;
14448 emit_insn (pat);
14451 return target;
14454 static rtx
14455 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14457 tree arg0 = CALL_EXPR_ARG (exp, 0);
14458 tree arg1 = CALL_EXPR_ARG (exp, 1);
14459 tree arg2 = CALL_EXPR_ARG (exp, 2);
14460 rtx op0 = expand_normal (arg0);
14461 rtx op1 = expand_normal (arg1);
14462 rtx op2 = expand_normal (arg2);
14463 rtx pat, addr;
14464 machine_mode tmode = insn_data[icode].operand[0].mode;
14465 machine_mode mode1 = Pmode;
14466 machine_mode mode2 = Pmode;
14468 /* Invalid arguments. Bail before doing anything stoopid! */
14469 if (arg0 == error_mark_node
14470 || arg1 == error_mark_node
14471 || arg2 == error_mark_node)
14472 return const0_rtx;
14474 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14475 op0 = copy_to_mode_reg (tmode, op0);
14477 op2 = copy_to_mode_reg (mode2, op2);
14479 if (op1 == const0_rtx)
14481 addr = gen_rtx_MEM (tmode, op2);
14483 else
14485 op1 = copy_to_mode_reg (mode1, op1);
14486 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14489 pat = GEN_FCN (icode) (addr, op0);
14490 if (pat)
14491 emit_insn (pat);
14492 return NULL_RTX;
14495 static rtx
14496 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14498 rtx pat;
14499 tree arg0 = CALL_EXPR_ARG (exp, 0);
14500 tree arg1 = CALL_EXPR_ARG (exp, 1);
14501 tree arg2 = CALL_EXPR_ARG (exp, 2);
14502 rtx op0 = expand_normal (arg0);
14503 rtx op1 = expand_normal (arg1);
14504 rtx op2 = expand_normal (arg2);
14505 machine_mode mode0 = insn_data[icode].operand[0].mode;
14506 machine_mode mode1 = insn_data[icode].operand[1].mode;
14507 machine_mode mode2 = insn_data[icode].operand[2].mode;
14509 if (icode == CODE_FOR_nothing)
14510 /* Builtin not supported on this processor. */
14511 return NULL_RTX;
14513 /* If we got invalid arguments bail out before generating bad rtl. */
14514 if (arg0 == error_mark_node
14515 || arg1 == error_mark_node
14516 || arg2 == error_mark_node)
14517 return NULL_RTX;
14519 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14520 op0 = copy_to_mode_reg (mode0, op0);
14521 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14522 op1 = copy_to_mode_reg (mode1, op1);
14523 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14524 op2 = copy_to_mode_reg (mode2, op2);
14526 pat = GEN_FCN (icode) (op0, op1, op2);
14527 if (pat)
14528 emit_insn (pat);
14530 return NULL_RTX;
14533 static rtx
14534 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14536 tree arg0 = CALL_EXPR_ARG (exp, 0);
14537 tree arg1 = CALL_EXPR_ARG (exp, 1);
14538 tree arg2 = CALL_EXPR_ARG (exp, 2);
14539 rtx op0 = expand_normal (arg0);
14540 rtx op1 = expand_normal (arg1);
14541 rtx op2 = expand_normal (arg2);
14542 rtx pat, addr, rawaddr;
14543 machine_mode tmode = insn_data[icode].operand[0].mode;
14544 machine_mode smode = insn_data[icode].operand[1].mode;
14545 machine_mode mode1 = Pmode;
14546 machine_mode mode2 = Pmode;
14548 /* Invalid arguments. Bail before doing anything stoopid! */
14549 if (arg0 == error_mark_node
14550 || arg1 == error_mark_node
14551 || arg2 == error_mark_node)
14552 return const0_rtx;
14554 op2 = copy_to_mode_reg (mode2, op2);
14556 /* For STVX, express the RTL accurately by ANDing the address with -16.
14557 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14558 so the raw address is fine. */
14559 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14560 || icode == CODE_FOR_altivec_stvx_v2di_2op
14561 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14562 || icode == CODE_FOR_altivec_stvx_v4si_2op
14563 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14564 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14566 if (op1 == const0_rtx)
14567 rawaddr = op2;
14568 else
14570 op1 = copy_to_mode_reg (mode1, op1);
14571 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14574 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14575 addr = gen_rtx_MEM (tmode, addr);
14577 op0 = copy_to_mode_reg (tmode, op0);
14579 /* For -maltivec=be, emit a permute to swap the elements, followed
14580 by the store. */
14581 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14583 rtx temp = gen_reg_rtx (tmode);
14584 rtx sel = swap_selector_for_mode (tmode);
14585 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14586 UNSPEC_VPERM);
14587 emit_insn (gen_rtx_SET (temp, vperm));
14588 emit_insn (gen_rtx_SET (addr, temp));
14590 else
14591 emit_insn (gen_rtx_SET (addr, op0));
14593 else
14595 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14596 op0 = copy_to_mode_reg (smode, op0);
14598 if (op1 == const0_rtx)
14599 addr = gen_rtx_MEM (tmode, op2);
14600 else
14602 op1 = copy_to_mode_reg (mode1, op1);
14603 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14606 pat = GEN_FCN (icode) (addr, op0);
14607 if (pat)
14608 emit_insn (pat);
14611 return NULL_RTX;
14614 /* Return the appropriate SPR number associated with the given builtin. */
14615 static inline HOST_WIDE_INT
14616 htm_spr_num (enum rs6000_builtins code)
14618 if (code == HTM_BUILTIN_GET_TFHAR
14619 || code == HTM_BUILTIN_SET_TFHAR)
14620 return TFHAR_SPR;
14621 else if (code == HTM_BUILTIN_GET_TFIAR
14622 || code == HTM_BUILTIN_SET_TFIAR)
14623 return TFIAR_SPR;
14624 else if (code == HTM_BUILTIN_GET_TEXASR
14625 || code == HTM_BUILTIN_SET_TEXASR)
14626 return TEXASR_SPR;
14627 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14628 || code == HTM_BUILTIN_SET_TEXASRU);
14629 return TEXASRU_SPR;
14632 /* Return the appropriate SPR regno associated with the given builtin. */
14633 static inline HOST_WIDE_INT
14634 htm_spr_regno (enum rs6000_builtins code)
14636 if (code == HTM_BUILTIN_GET_TFHAR
14637 || code == HTM_BUILTIN_SET_TFHAR)
14638 return TFHAR_REGNO;
14639 else if (code == HTM_BUILTIN_GET_TFIAR
14640 || code == HTM_BUILTIN_SET_TFIAR)
14641 return TFIAR_REGNO;
14642 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14643 || code == HTM_BUILTIN_SET_TEXASR
14644 || code == HTM_BUILTIN_GET_TEXASRU
14645 || code == HTM_BUILTIN_SET_TEXASRU);
14646 return TEXASR_REGNO;
14649 /* Return the correct ICODE value depending on whether we are
14650 setting or reading the HTM SPRs. */
14651 static inline enum insn_code
14652 rs6000_htm_spr_icode (bool nonvoid)
14654 if (nonvoid)
14655 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14656 else
14657 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14660 /* Expand the HTM builtin in EXP and store the result in TARGET.
14661 Store true in *EXPANDEDP if we found a builtin to expand. */
14662 static rtx
14663 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14665 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14666 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14667 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14668 const struct builtin_description *d;
14669 size_t i;
14671 *expandedp = true;
14673 if (!TARGET_POWERPC64
14674 && (fcode == HTM_BUILTIN_TABORTDC
14675 || fcode == HTM_BUILTIN_TABORTDCI))
14677 size_t uns_fcode = (size_t)fcode;
14678 const char *name = rs6000_builtin_info[uns_fcode].name;
14679 error ("builtin %s is only valid in 64-bit mode", name);
14680 return const0_rtx;
14683 /* Expand the HTM builtins. */
14684 d = bdesc_htm;
14685 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14686 if (d->code == fcode)
14688 rtx op[MAX_HTM_OPERANDS], pat;
14689 int nopnds = 0;
14690 tree arg;
14691 call_expr_arg_iterator iter;
14692 unsigned attr = rs6000_builtin_info[fcode].attr;
14693 enum insn_code icode = d->icode;
14694 const struct insn_operand_data *insn_op;
14695 bool uses_spr = (attr & RS6000_BTC_SPR);
14696 rtx cr = NULL_RTX;
14698 if (uses_spr)
14699 icode = rs6000_htm_spr_icode (nonvoid);
14700 insn_op = &insn_data[icode].operand[0];
14702 if (nonvoid)
14704 machine_mode tmode = (uses_spr) ? insn_op->mode : SImode;
14705 if (!target
14706 || GET_MODE (target) != tmode
14707 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14708 target = gen_reg_rtx (tmode);
14709 if (uses_spr)
14710 op[nopnds++] = target;
14713 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14715 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14716 return const0_rtx;
14718 insn_op = &insn_data[icode].operand[nopnds];
14720 op[nopnds] = expand_normal (arg);
14722 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14724 if (!strcmp (insn_op->constraint, "n"))
14726 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14727 if (!CONST_INT_P (op[nopnds]))
14728 error ("argument %d must be an unsigned literal", arg_num);
14729 else
14730 error ("argument %d is an unsigned literal that is "
14731 "out of range", arg_num);
14732 return const0_rtx;
14734 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14737 nopnds++;
14740 /* Handle the builtins for extended mnemonics. These accept
14741 no arguments, but map to builtins that take arguments. */
14742 switch (fcode)
14744 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14745 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14746 op[nopnds++] = GEN_INT (1);
14747 if (flag_checking)
14748 attr |= RS6000_BTC_UNARY;
14749 break;
14750 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14751 op[nopnds++] = GEN_INT (0);
14752 if (flag_checking)
14753 attr |= RS6000_BTC_UNARY;
14754 break;
14755 default:
14756 break;
14759 /* If this builtin accesses SPRs, then pass in the appropriate
14760 SPR number and SPR regno as the last two operands. */
14761 if (uses_spr)
14763 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14764 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14765 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14767 /* If this builtin accesses a CR, then pass in a scratch
14768 CR as the last operand. */
14769 else if (attr & RS6000_BTC_CR)
14770 { cr = gen_reg_rtx (CCmode);
14771 op[nopnds++] = cr;
14774 if (flag_checking)
14776 int expected_nopnds = 0;
14777 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14778 expected_nopnds = 1;
14779 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14780 expected_nopnds = 2;
14781 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14782 expected_nopnds = 3;
14783 if (!(attr & RS6000_BTC_VOID))
14784 expected_nopnds += 1;
14785 if (uses_spr)
14786 expected_nopnds += 2;
14788 gcc_assert (nopnds == expected_nopnds
14789 && nopnds <= MAX_HTM_OPERANDS);
14792 switch (nopnds)
14794 case 1:
14795 pat = GEN_FCN (icode) (op[0]);
14796 break;
14797 case 2:
14798 pat = GEN_FCN (icode) (op[0], op[1]);
14799 break;
14800 case 3:
14801 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14802 break;
14803 case 4:
14804 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14805 break;
14806 default:
14807 gcc_unreachable ();
14809 if (!pat)
14810 return NULL_RTX;
14811 emit_insn (pat);
14813 if (attr & RS6000_BTC_CR)
14815 if (fcode == HTM_BUILTIN_TBEGIN)
14817 /* Emit code to set TARGET to true or false depending on
14818 whether the tbegin. instruction successfully or failed
14819 to start a transaction. We do this by placing the 1's
14820 complement of CR's EQ bit into TARGET. */
14821 rtx scratch = gen_reg_rtx (SImode);
14822 emit_insn (gen_rtx_SET (scratch,
14823 gen_rtx_EQ (SImode, cr,
14824 const0_rtx)));
14825 emit_insn (gen_rtx_SET (target,
14826 gen_rtx_XOR (SImode, scratch,
14827 GEN_INT (1))));
14829 else
14831 /* Emit code to copy the 4-bit condition register field
14832 CR into the least significant end of register TARGET. */
14833 rtx scratch1 = gen_reg_rtx (SImode);
14834 rtx scratch2 = gen_reg_rtx (SImode);
14835 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14836 emit_insn (gen_movcc (subreg, cr));
14837 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14838 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14842 if (nonvoid)
14843 return target;
14844 return const0_rtx;
14847 *expandedp = false;
14848 return NULL_RTX;
14851 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14853 static rtx
14854 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14855 rtx target)
14857 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14858 if (fcode == RS6000_BUILTIN_CPU_INIT)
14859 return const0_rtx;
14861 if (target == 0 || GET_MODE (target) != SImode)
14862 target = gen_reg_rtx (SImode);
14864 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14865 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14866 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14867 to a STRING_CST. */
14868 if (TREE_CODE (arg) == ARRAY_REF
14869 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14870 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14871 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14872 arg = TREE_OPERAND (arg, 0);
14874 if (TREE_CODE (arg) != STRING_CST)
14876 error ("builtin %s only accepts a string argument",
14877 rs6000_builtin_info[(size_t) fcode].name);
14878 return const0_rtx;
14881 if (fcode == RS6000_BUILTIN_CPU_IS)
14883 const char *cpu = TREE_STRING_POINTER (arg);
14884 rtx cpuid = NULL_RTX;
14885 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14886 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14888 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14889 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14890 break;
14892 if (cpuid == NULL_RTX)
14894 /* Invalid CPU argument. */
14895 error ("cpu %s is an invalid argument to builtin %s",
14896 cpu, rs6000_builtin_info[(size_t) fcode].name);
14897 return const0_rtx;
14900 rtx platform = gen_reg_rtx (SImode);
14901 rtx tcbmem = gen_const_mem (SImode,
14902 gen_rtx_PLUS (Pmode,
14903 gen_rtx_REG (Pmode, TLS_REGNUM),
14904 GEN_INT (TCB_PLATFORM_OFFSET)));
14905 emit_move_insn (platform, tcbmem);
14906 emit_insn (gen_eqsi3 (target, platform, cpuid));
14908 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14910 const char *hwcap = TREE_STRING_POINTER (arg);
14911 rtx mask = NULL_RTX;
14912 int hwcap_offset;
14913 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14914 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14916 mask = GEN_INT (cpu_supports_info[i].mask);
14917 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14918 break;
14920 if (mask == NULL_RTX)
14922 /* Invalid HWCAP argument. */
14923 error ("hwcap %s is an invalid argument to builtin %s",
14924 hwcap, rs6000_builtin_info[(size_t) fcode].name);
14925 return const0_rtx;
14928 rtx tcb_hwcap = gen_reg_rtx (SImode);
14929 rtx tcbmem = gen_const_mem (SImode,
14930 gen_rtx_PLUS (Pmode,
14931 gen_rtx_REG (Pmode, TLS_REGNUM),
14932 GEN_INT (hwcap_offset)));
14933 emit_move_insn (tcb_hwcap, tcbmem);
14934 rtx scratch1 = gen_reg_rtx (SImode);
14935 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14936 rtx scratch2 = gen_reg_rtx (SImode);
14937 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14938 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14940 else
14941 gcc_unreachable ();
14943 /* Record that we have expanded a CPU builtin, so that we can later
14944 emit a reference to the special symbol exported by LIBC to ensure we
14945 do not link against an old LIBC that doesn't support this feature. */
14946 cpu_builtin_p = true;
14948 #else
14949 warning (0, "%s needs GLIBC (2.23 and newer) that exports hardware "
14950 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14952 /* For old LIBCs, always return FALSE. */
14953 emit_move_insn (target, GEN_INT (0));
14954 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14956 return target;
14959 static rtx
14960 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14962 rtx pat;
14963 tree arg0 = CALL_EXPR_ARG (exp, 0);
14964 tree arg1 = CALL_EXPR_ARG (exp, 1);
14965 tree arg2 = CALL_EXPR_ARG (exp, 2);
14966 rtx op0 = expand_normal (arg0);
14967 rtx op1 = expand_normal (arg1);
14968 rtx op2 = expand_normal (arg2);
14969 machine_mode tmode = insn_data[icode].operand[0].mode;
14970 machine_mode mode0 = insn_data[icode].operand[1].mode;
14971 machine_mode mode1 = insn_data[icode].operand[2].mode;
14972 machine_mode mode2 = insn_data[icode].operand[3].mode;
14974 if (icode == CODE_FOR_nothing)
14975 /* Builtin not supported on this processor. */
14976 return 0;
14978 /* If we got invalid arguments bail out before generating bad rtl. */
14979 if (arg0 == error_mark_node
14980 || arg1 == error_mark_node
14981 || arg2 == error_mark_node)
14982 return const0_rtx;
14984 /* Check and prepare argument depending on the instruction code.
14986 Note that a switch statement instead of the sequence of tests
14987 would be incorrect as many of the CODE_FOR values could be
14988 CODE_FOR_nothing and that would yield multiple alternatives
14989 with identical values. We'd never reach here at runtime in
14990 this case. */
14991 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14992 || icode == CODE_FOR_altivec_vsldoi_v2df
14993 || icode == CODE_FOR_altivec_vsldoi_v4si
14994 || icode == CODE_FOR_altivec_vsldoi_v8hi
14995 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14997 /* Only allow 4-bit unsigned literals. */
14998 STRIP_NOPS (arg2);
14999 if (TREE_CODE (arg2) != INTEGER_CST
15000 || TREE_INT_CST_LOW (arg2) & ~0xf)
15002 error ("argument 3 must be a 4-bit unsigned literal");
15003 return CONST0_RTX (tmode);
15006 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15007 || icode == CODE_FOR_vsx_xxpermdi_v2di
15008 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15009 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15010 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15011 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15012 || icode == CODE_FOR_vsx_xxpermdi_v4si
15013 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15014 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15015 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15016 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15017 || icode == CODE_FOR_vsx_xxsldwi_v4si
15018 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15019 || icode == CODE_FOR_vsx_xxsldwi_v2di
15020 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15022 /* Only allow 2-bit unsigned literals. */
15023 STRIP_NOPS (arg2);
15024 if (TREE_CODE (arg2) != INTEGER_CST
15025 || TREE_INT_CST_LOW (arg2) & ~0x3)
15027 error ("argument 3 must be a 2-bit unsigned literal");
15028 return CONST0_RTX (tmode);
15031 else if (icode == CODE_FOR_vsx_set_v2df
15032 || icode == CODE_FOR_vsx_set_v2di
15033 || icode == CODE_FOR_bcdadd
15034 || icode == CODE_FOR_bcdadd_lt
15035 || icode == CODE_FOR_bcdadd_eq
15036 || icode == CODE_FOR_bcdadd_gt
15037 || icode == CODE_FOR_bcdsub
15038 || icode == CODE_FOR_bcdsub_lt
15039 || icode == CODE_FOR_bcdsub_eq
15040 || icode == CODE_FOR_bcdsub_gt)
15042 /* Only allow 1-bit unsigned literals. */
15043 STRIP_NOPS (arg2);
15044 if (TREE_CODE (arg2) != INTEGER_CST
15045 || TREE_INT_CST_LOW (arg2) & ~0x1)
15047 error ("argument 3 must be a 1-bit unsigned literal");
15048 return CONST0_RTX (tmode);
15051 else if (icode == CODE_FOR_dfp_ddedpd_dd
15052 || icode == CODE_FOR_dfp_ddedpd_td)
15054 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15055 STRIP_NOPS (arg0);
15056 if (TREE_CODE (arg0) != INTEGER_CST
15057 || TREE_INT_CST_LOW (arg2) & ~0x3)
15059 error ("argument 1 must be 0 or 2");
15060 return CONST0_RTX (tmode);
15063 else if (icode == CODE_FOR_dfp_denbcd_dd
15064 || icode == CODE_FOR_dfp_denbcd_td)
15066 /* Only allow 1-bit unsigned literals. */
15067 STRIP_NOPS (arg0);
15068 if (TREE_CODE (arg0) != INTEGER_CST
15069 || TREE_INT_CST_LOW (arg0) & ~0x1)
15071 error ("argument 1 must be a 1-bit unsigned literal");
15072 return CONST0_RTX (tmode);
15075 else if (icode == CODE_FOR_dfp_dscli_dd
15076 || icode == CODE_FOR_dfp_dscli_td
15077 || icode == CODE_FOR_dfp_dscri_dd
15078 || icode == CODE_FOR_dfp_dscri_td)
15080 /* Only allow 6-bit unsigned literals. */
15081 STRIP_NOPS (arg1);
15082 if (TREE_CODE (arg1) != INTEGER_CST
15083 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15085 error ("argument 2 must be a 6-bit unsigned literal");
15086 return CONST0_RTX (tmode);
15089 else if (icode == CODE_FOR_crypto_vshasigmaw
15090 || icode == CODE_FOR_crypto_vshasigmad)
15092 /* Check whether the 2nd and 3rd arguments are integer constants and in
15093 range and prepare arguments. */
15094 STRIP_NOPS (arg1);
15095 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
15097 error ("argument 2 must be 0 or 1");
15098 return CONST0_RTX (tmode);
15101 STRIP_NOPS (arg2);
15102 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
15104 error ("argument 3 must be in the range 0..15");
15105 return CONST0_RTX (tmode);
15109 if (target == 0
15110 || GET_MODE (target) != tmode
15111 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15112 target = gen_reg_rtx (tmode);
15114 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15115 op0 = copy_to_mode_reg (mode0, op0);
15116 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15117 op1 = copy_to_mode_reg (mode1, op1);
15118 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15119 op2 = copy_to_mode_reg (mode2, op2);
15121 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15122 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15123 else
15124 pat = GEN_FCN (icode) (target, op0, op1, op2);
15125 if (! pat)
15126 return 0;
15127 emit_insn (pat);
15129 return target;
15132 /* Expand the lvx builtins. */
15133 static rtx
15134 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15136 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15137 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15138 tree arg0;
15139 machine_mode tmode, mode0;
15140 rtx pat, op0;
15141 enum insn_code icode;
15143 switch (fcode)
15145 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15146 icode = CODE_FOR_vector_altivec_load_v16qi;
15147 break;
15148 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15149 icode = CODE_FOR_vector_altivec_load_v8hi;
15150 break;
15151 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15152 icode = CODE_FOR_vector_altivec_load_v4si;
15153 break;
15154 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15155 icode = CODE_FOR_vector_altivec_load_v4sf;
15156 break;
15157 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15158 icode = CODE_FOR_vector_altivec_load_v2df;
15159 break;
15160 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15161 icode = CODE_FOR_vector_altivec_load_v2di;
15162 break;
15163 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15164 icode = CODE_FOR_vector_altivec_load_v1ti;
15165 break;
15166 default:
15167 *expandedp = false;
15168 return NULL_RTX;
15171 *expandedp = true;
15173 arg0 = CALL_EXPR_ARG (exp, 0);
15174 op0 = expand_normal (arg0);
15175 tmode = insn_data[icode].operand[0].mode;
15176 mode0 = insn_data[icode].operand[1].mode;
15178 if (target == 0
15179 || GET_MODE (target) != tmode
15180 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15181 target = gen_reg_rtx (tmode);
15183 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15184 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15186 pat = GEN_FCN (icode) (target, op0);
15187 if (! pat)
15188 return 0;
15189 emit_insn (pat);
15190 return target;
15193 /* Expand the stvx builtins. */
15194 static rtx
15195 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15196 bool *expandedp)
15198 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15199 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15200 tree arg0, arg1;
15201 machine_mode mode0, mode1;
15202 rtx pat, op0, op1;
15203 enum insn_code icode;
15205 switch (fcode)
15207 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15208 icode = CODE_FOR_vector_altivec_store_v16qi;
15209 break;
15210 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15211 icode = CODE_FOR_vector_altivec_store_v8hi;
15212 break;
15213 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15214 icode = CODE_FOR_vector_altivec_store_v4si;
15215 break;
15216 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15217 icode = CODE_FOR_vector_altivec_store_v4sf;
15218 break;
15219 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15220 icode = CODE_FOR_vector_altivec_store_v2df;
15221 break;
15222 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15223 icode = CODE_FOR_vector_altivec_store_v2di;
15224 break;
15225 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15226 icode = CODE_FOR_vector_altivec_store_v1ti;
15227 break;
15228 default:
15229 *expandedp = false;
15230 return NULL_RTX;
15233 arg0 = CALL_EXPR_ARG (exp, 0);
15234 arg1 = CALL_EXPR_ARG (exp, 1);
15235 op0 = expand_normal (arg0);
15236 op1 = expand_normal (arg1);
15237 mode0 = insn_data[icode].operand[0].mode;
15238 mode1 = insn_data[icode].operand[1].mode;
15240 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15241 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15242 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15243 op1 = copy_to_mode_reg (mode1, op1);
15245 pat = GEN_FCN (icode) (op0, op1);
15246 if (pat)
15247 emit_insn (pat);
15249 *expandedp = true;
15250 return NULL_RTX;
15253 /* Expand the dst builtins. */
15254 static rtx
15255 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15256 bool *expandedp)
15258 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15259 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15260 tree arg0, arg1, arg2;
15261 machine_mode mode0, mode1;
15262 rtx pat, op0, op1, op2;
15263 const struct builtin_description *d;
15264 size_t i;
15266 *expandedp = false;
15268 /* Handle DST variants. */
15269 d = bdesc_dst;
15270 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15271 if (d->code == fcode)
15273 arg0 = CALL_EXPR_ARG (exp, 0);
15274 arg1 = CALL_EXPR_ARG (exp, 1);
15275 arg2 = CALL_EXPR_ARG (exp, 2);
15276 op0 = expand_normal (arg0);
15277 op1 = expand_normal (arg1);
15278 op2 = expand_normal (arg2);
15279 mode0 = insn_data[d->icode].operand[0].mode;
15280 mode1 = insn_data[d->icode].operand[1].mode;
15282 /* Invalid arguments, bail out before generating bad rtl. */
15283 if (arg0 == error_mark_node
15284 || arg1 == error_mark_node
15285 || arg2 == error_mark_node)
15286 return const0_rtx;
15288 *expandedp = true;
15289 STRIP_NOPS (arg2);
15290 if (TREE_CODE (arg2) != INTEGER_CST
15291 || TREE_INT_CST_LOW (arg2) & ~0x3)
15293 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15294 return const0_rtx;
15297 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15298 op0 = copy_to_mode_reg (Pmode, op0);
15299 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15300 op1 = copy_to_mode_reg (mode1, op1);
15302 pat = GEN_FCN (d->icode) (op0, op1, op2);
15303 if (pat != 0)
15304 emit_insn (pat);
15306 return NULL_RTX;
15309 return NULL_RTX;
15312 /* Expand vec_init builtin. */
15313 static rtx
15314 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15316 machine_mode tmode = TYPE_MODE (type);
15317 machine_mode inner_mode = GET_MODE_INNER (tmode);
15318 int i, n_elt = GET_MODE_NUNITS (tmode);
15320 gcc_assert (VECTOR_MODE_P (tmode));
15321 gcc_assert (n_elt == call_expr_nargs (exp));
15323 if (!target || !register_operand (target, tmode))
15324 target = gen_reg_rtx (tmode);
15326 /* If we have a vector compromised of a single element, such as V1TImode, do
15327 the initialization directly. */
15328 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15330 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15331 emit_move_insn (target, gen_lowpart (tmode, x));
15333 else
15335 rtvec v = rtvec_alloc (n_elt);
15337 for (i = 0; i < n_elt; ++i)
15339 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15340 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15343 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15346 return target;
15349 /* Return the integer constant in ARG. Constrain it to be in the range
15350 of the subparts of VEC_TYPE; issue an error if not. */
15352 static int
15353 get_element_number (tree vec_type, tree arg)
15355 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15357 if (!tree_fits_uhwi_p (arg)
15358 || (elt = tree_to_uhwi (arg), elt > max))
15360 error ("selector must be an integer constant in the range 0..%wi", max);
15361 return 0;
15364 return elt;
15367 /* Expand vec_set builtin. */
15368 static rtx
15369 altivec_expand_vec_set_builtin (tree exp)
15371 machine_mode tmode, mode1;
15372 tree arg0, arg1, arg2;
15373 int elt;
15374 rtx op0, op1;
15376 arg0 = CALL_EXPR_ARG (exp, 0);
15377 arg1 = CALL_EXPR_ARG (exp, 1);
15378 arg2 = CALL_EXPR_ARG (exp, 2);
15380 tmode = TYPE_MODE (TREE_TYPE (arg0));
15381 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15382 gcc_assert (VECTOR_MODE_P (tmode));
15384 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15385 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15386 elt = get_element_number (TREE_TYPE (arg0), arg2);
15388 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15389 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15391 op0 = force_reg (tmode, op0);
15392 op1 = force_reg (mode1, op1);
15394 rs6000_expand_vector_set (op0, op1, elt);
15396 return op0;
15399 /* Expand vec_ext builtin. */
15400 static rtx
15401 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15403 machine_mode tmode, mode0;
15404 tree arg0, arg1;
15405 rtx op0;
15406 rtx op1;
15408 arg0 = CALL_EXPR_ARG (exp, 0);
15409 arg1 = CALL_EXPR_ARG (exp, 1);
15411 op0 = expand_normal (arg0);
15412 op1 = expand_normal (arg1);
15414 /* Call get_element_number to validate arg1 if it is a constant. */
15415 if (TREE_CODE (arg1) == INTEGER_CST)
15416 (void) get_element_number (TREE_TYPE (arg0), arg1);
15418 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15419 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15420 gcc_assert (VECTOR_MODE_P (mode0));
15422 op0 = force_reg (mode0, op0);
15424 if (optimize || !target || !register_operand (target, tmode))
15425 target = gen_reg_rtx (tmode);
15427 rs6000_expand_vector_extract (target, op0, op1);
15429 return target;
15432 /* Expand the builtin in EXP and store the result in TARGET. Store
15433 true in *EXPANDEDP if we found a builtin to expand. */
15434 static rtx
15435 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15437 const struct builtin_description *d;
15438 size_t i;
15439 enum insn_code icode;
15440 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15441 tree arg0, arg1, arg2;
15442 rtx op0, pat;
15443 machine_mode tmode, mode0;
15444 enum rs6000_builtins fcode
15445 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15447 if (rs6000_overloaded_builtin_p (fcode))
15449 *expandedp = true;
15450 error ("unresolved overload for Altivec builtin %qF", fndecl);
15452 /* Given it is invalid, just generate a normal call. */
15453 return expand_call (exp, target, false);
15456 target = altivec_expand_ld_builtin (exp, target, expandedp);
15457 if (*expandedp)
15458 return target;
15460 target = altivec_expand_st_builtin (exp, target, expandedp);
15461 if (*expandedp)
15462 return target;
15464 target = altivec_expand_dst_builtin (exp, target, expandedp);
15465 if (*expandedp)
15466 return target;
15468 *expandedp = true;
15470 switch (fcode)
15472 case ALTIVEC_BUILTIN_STVX_V2DF:
15473 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15474 case ALTIVEC_BUILTIN_STVX_V2DI:
15475 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15476 case ALTIVEC_BUILTIN_STVX_V4SF:
15477 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15478 case ALTIVEC_BUILTIN_STVX:
15479 case ALTIVEC_BUILTIN_STVX_V4SI:
15480 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15481 case ALTIVEC_BUILTIN_STVX_V8HI:
15482 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15483 case ALTIVEC_BUILTIN_STVX_V16QI:
15484 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15485 case ALTIVEC_BUILTIN_STVEBX:
15486 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15487 case ALTIVEC_BUILTIN_STVEHX:
15488 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15489 case ALTIVEC_BUILTIN_STVEWX:
15490 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15491 case ALTIVEC_BUILTIN_STVXL_V2DF:
15492 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15493 case ALTIVEC_BUILTIN_STVXL_V2DI:
15494 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15495 case ALTIVEC_BUILTIN_STVXL_V4SF:
15496 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15497 case ALTIVEC_BUILTIN_STVXL:
15498 case ALTIVEC_BUILTIN_STVXL_V4SI:
15499 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15500 case ALTIVEC_BUILTIN_STVXL_V8HI:
15501 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15502 case ALTIVEC_BUILTIN_STVXL_V16QI:
15503 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15505 case ALTIVEC_BUILTIN_STVLX:
15506 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15507 case ALTIVEC_BUILTIN_STVLXL:
15508 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15509 case ALTIVEC_BUILTIN_STVRX:
15510 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15511 case ALTIVEC_BUILTIN_STVRXL:
15512 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15514 case P9V_BUILTIN_STXVL:
15515 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15517 case VSX_BUILTIN_STXVD2X_V1TI:
15518 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15519 case VSX_BUILTIN_STXVD2X_V2DF:
15520 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15521 case VSX_BUILTIN_STXVD2X_V2DI:
15522 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15523 case VSX_BUILTIN_STXVW4X_V4SF:
15524 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15525 case VSX_BUILTIN_STXVW4X_V4SI:
15526 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15527 case VSX_BUILTIN_STXVW4X_V8HI:
15528 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15529 case VSX_BUILTIN_STXVW4X_V16QI:
15530 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15532 /* For the following on big endian, it's ok to use any appropriate
15533 unaligned-supporting store, so use a generic expander. For
15534 little-endian, the exact element-reversing instruction must
15535 be used. */
15536 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15538 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15539 : CODE_FOR_vsx_st_elemrev_v2df);
15540 return altivec_expand_stv_builtin (code, exp);
15542 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15544 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15545 : CODE_FOR_vsx_st_elemrev_v2di);
15546 return altivec_expand_stv_builtin (code, exp);
15548 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15550 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15551 : CODE_FOR_vsx_st_elemrev_v4sf);
15552 return altivec_expand_stv_builtin (code, exp);
15554 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15556 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15557 : CODE_FOR_vsx_st_elemrev_v4si);
15558 return altivec_expand_stv_builtin (code, exp);
15560 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15562 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15563 : CODE_FOR_vsx_st_elemrev_v8hi);
15564 return altivec_expand_stv_builtin (code, exp);
15566 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15568 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15569 : CODE_FOR_vsx_st_elemrev_v16qi);
15570 return altivec_expand_stv_builtin (code, exp);
15573 case ALTIVEC_BUILTIN_MFVSCR:
15574 icode = CODE_FOR_altivec_mfvscr;
15575 tmode = insn_data[icode].operand[0].mode;
15577 if (target == 0
15578 || GET_MODE (target) != tmode
15579 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15580 target = gen_reg_rtx (tmode);
15582 pat = GEN_FCN (icode) (target);
15583 if (! pat)
15584 return 0;
15585 emit_insn (pat);
15586 return target;
15588 case ALTIVEC_BUILTIN_MTVSCR:
15589 icode = CODE_FOR_altivec_mtvscr;
15590 arg0 = CALL_EXPR_ARG (exp, 0);
15591 op0 = expand_normal (arg0);
15592 mode0 = insn_data[icode].operand[0].mode;
15594 /* If we got invalid arguments bail out before generating bad rtl. */
15595 if (arg0 == error_mark_node)
15596 return const0_rtx;
15598 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15599 op0 = copy_to_mode_reg (mode0, op0);
15601 pat = GEN_FCN (icode) (op0);
15602 if (pat)
15603 emit_insn (pat);
15604 return NULL_RTX;
15606 case ALTIVEC_BUILTIN_DSSALL:
15607 emit_insn (gen_altivec_dssall ());
15608 return NULL_RTX;
15610 case ALTIVEC_BUILTIN_DSS:
15611 icode = CODE_FOR_altivec_dss;
15612 arg0 = CALL_EXPR_ARG (exp, 0);
15613 STRIP_NOPS (arg0);
15614 op0 = expand_normal (arg0);
15615 mode0 = insn_data[icode].operand[0].mode;
15617 /* If we got invalid arguments bail out before generating bad rtl. */
15618 if (arg0 == error_mark_node)
15619 return const0_rtx;
15621 if (TREE_CODE (arg0) != INTEGER_CST
15622 || TREE_INT_CST_LOW (arg0) & ~0x3)
15624 error ("argument to dss must be a 2-bit unsigned literal");
15625 return const0_rtx;
15628 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15629 op0 = copy_to_mode_reg (mode0, op0);
15631 emit_insn (gen_altivec_dss (op0));
15632 return NULL_RTX;
15634 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15635 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15636 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15637 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15638 case VSX_BUILTIN_VEC_INIT_V2DF:
15639 case VSX_BUILTIN_VEC_INIT_V2DI:
15640 case VSX_BUILTIN_VEC_INIT_V1TI:
15641 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15643 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15644 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15645 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15646 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15647 case VSX_BUILTIN_VEC_SET_V2DF:
15648 case VSX_BUILTIN_VEC_SET_V2DI:
15649 case VSX_BUILTIN_VEC_SET_V1TI:
15650 return altivec_expand_vec_set_builtin (exp);
15652 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15653 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15654 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15655 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15656 case VSX_BUILTIN_VEC_EXT_V2DF:
15657 case VSX_BUILTIN_VEC_EXT_V2DI:
15658 case VSX_BUILTIN_VEC_EXT_V1TI:
15659 return altivec_expand_vec_ext_builtin (exp, target);
15661 case P9V_BUILTIN_VEXTRACT4B:
15662 case P9V_BUILTIN_VEC_VEXTRACT4B:
15663 arg1 = CALL_EXPR_ARG (exp, 1);
15664 STRIP_NOPS (arg1);
15666 /* Generate a normal call if it is invalid. */
15667 if (arg1 == error_mark_node)
15668 return expand_call (exp, target, false);
15670 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15672 error ("second argument to vec_vextract4b must be 0..12");
15673 return expand_call (exp, target, false);
15675 break;
15677 case P9V_BUILTIN_VINSERT4B:
15678 case P9V_BUILTIN_VINSERT4B_DI:
15679 case P9V_BUILTIN_VEC_VINSERT4B:
15680 arg2 = CALL_EXPR_ARG (exp, 2);
15681 STRIP_NOPS (arg2);
15683 /* Generate a normal call if it is invalid. */
15684 if (arg2 == error_mark_node)
15685 return expand_call (exp, target, false);
15687 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15689 error ("third argument to vec_vinsert4b must be 0..12");
15690 return expand_call (exp, target, false);
15692 break;
15694 default:
15695 break;
15696 /* Fall through. */
15699 /* Expand abs* operations. */
15700 d = bdesc_abs;
15701 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15702 if (d->code == fcode)
15703 return altivec_expand_abs_builtin (d->icode, exp, target);
15705 /* Expand the AltiVec predicates. */
15706 d = bdesc_altivec_preds;
15707 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15708 if (d->code == fcode)
15709 return altivec_expand_predicate_builtin (d->icode, exp, target);
15711 /* LV* are funky. We initialized them differently. */
15712 switch (fcode)
15714 case ALTIVEC_BUILTIN_LVSL:
15715 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15716 exp, target, false);
15717 case ALTIVEC_BUILTIN_LVSR:
15718 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15719 exp, target, false);
15720 case ALTIVEC_BUILTIN_LVEBX:
15721 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15722 exp, target, false);
15723 case ALTIVEC_BUILTIN_LVEHX:
15724 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15725 exp, target, false);
15726 case ALTIVEC_BUILTIN_LVEWX:
15727 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15728 exp, target, false);
15729 case ALTIVEC_BUILTIN_LVXL_V2DF:
15730 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15731 exp, target, false);
15732 case ALTIVEC_BUILTIN_LVXL_V2DI:
15733 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15734 exp, target, false);
15735 case ALTIVEC_BUILTIN_LVXL_V4SF:
15736 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15737 exp, target, false);
15738 case ALTIVEC_BUILTIN_LVXL:
15739 case ALTIVEC_BUILTIN_LVXL_V4SI:
15740 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15741 exp, target, false);
15742 case ALTIVEC_BUILTIN_LVXL_V8HI:
15743 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15744 exp, target, false);
15745 case ALTIVEC_BUILTIN_LVXL_V16QI:
15746 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15747 exp, target, false);
15748 case ALTIVEC_BUILTIN_LVX_V2DF:
15749 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15750 exp, target, false);
15751 case ALTIVEC_BUILTIN_LVX_V2DI:
15752 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15753 exp, target, false);
15754 case ALTIVEC_BUILTIN_LVX_V4SF:
15755 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15756 exp, target, false);
15757 case ALTIVEC_BUILTIN_LVX:
15758 case ALTIVEC_BUILTIN_LVX_V4SI:
15759 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15760 exp, target, false);
15761 case ALTIVEC_BUILTIN_LVX_V8HI:
15762 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15763 exp, target, false);
15764 case ALTIVEC_BUILTIN_LVX_V16QI:
15765 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15766 exp, target, false);
15767 case ALTIVEC_BUILTIN_LVLX:
15768 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15769 exp, target, true);
15770 case ALTIVEC_BUILTIN_LVLXL:
15771 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15772 exp, target, true);
15773 case ALTIVEC_BUILTIN_LVRX:
15774 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15775 exp, target, true);
15776 case ALTIVEC_BUILTIN_LVRXL:
15777 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15778 exp, target, true);
15779 case VSX_BUILTIN_LXVD2X_V1TI:
15780 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15781 exp, target, false);
15782 case VSX_BUILTIN_LXVD2X_V2DF:
15783 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15784 exp, target, false);
15785 case VSX_BUILTIN_LXVD2X_V2DI:
15786 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15787 exp, target, false);
15788 case VSX_BUILTIN_LXVW4X_V4SF:
15789 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15790 exp, target, false);
15791 case VSX_BUILTIN_LXVW4X_V4SI:
15792 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15793 exp, target, false);
15794 case VSX_BUILTIN_LXVW4X_V8HI:
15795 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15796 exp, target, false);
15797 case VSX_BUILTIN_LXVW4X_V16QI:
15798 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15799 exp, target, false);
15800 /* For the following on big endian, it's ok to use any appropriate
15801 unaligned-supporting load, so use a generic expander. For
15802 little-endian, the exact element-reversing instruction must
15803 be used. */
15804 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15806 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15807 : CODE_FOR_vsx_ld_elemrev_v2df);
15808 return altivec_expand_lv_builtin (code, exp, target, false);
15810 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15812 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15813 : CODE_FOR_vsx_ld_elemrev_v2di);
15814 return altivec_expand_lv_builtin (code, exp, target, false);
15816 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15818 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15819 : CODE_FOR_vsx_ld_elemrev_v4sf);
15820 return altivec_expand_lv_builtin (code, exp, target, false);
15822 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15824 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15825 : CODE_FOR_vsx_ld_elemrev_v4si);
15826 return altivec_expand_lv_builtin (code, exp, target, false);
15828 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15830 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15831 : CODE_FOR_vsx_ld_elemrev_v8hi);
15832 return altivec_expand_lv_builtin (code, exp, target, false);
15834 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15836 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15837 : CODE_FOR_vsx_ld_elemrev_v16qi);
15838 return altivec_expand_lv_builtin (code, exp, target, false);
15840 break;
15841 default:
15842 break;
15843 /* Fall through. */
15846 *expandedp = false;
15847 return NULL_RTX;
15850 /* Expand the builtin in EXP and store the result in TARGET. Store
15851 true in *EXPANDEDP if we found a builtin to expand. */
15852 static rtx
15853 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15855 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15856 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15857 const struct builtin_description *d;
15858 size_t i;
15860 *expandedp = true;
15862 switch (fcode)
15864 case PAIRED_BUILTIN_STX:
15865 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15866 case PAIRED_BUILTIN_LX:
15867 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15868 default:
15869 break;
15870 /* Fall through. */
15873 /* Expand the paired predicates. */
15874 d = bdesc_paired_preds;
15875 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15876 if (d->code == fcode)
15877 return paired_expand_predicate_builtin (d->icode, exp, target);
15879 *expandedp = false;
15880 return NULL_RTX;
15883 static rtx
15884 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15886 rtx pat, scratch, tmp;
15887 tree form = CALL_EXPR_ARG (exp, 0);
15888 tree arg0 = CALL_EXPR_ARG (exp, 1);
15889 tree arg1 = CALL_EXPR_ARG (exp, 2);
15890 rtx op0 = expand_normal (arg0);
15891 rtx op1 = expand_normal (arg1);
15892 machine_mode mode0 = insn_data[icode].operand[1].mode;
15893 machine_mode mode1 = insn_data[icode].operand[2].mode;
15894 int form_int;
15895 enum rtx_code code;
15897 if (TREE_CODE (form) != INTEGER_CST)
15899 error ("argument 1 of __builtin_paired_predicate must be a constant");
15900 return const0_rtx;
15902 else
15903 form_int = TREE_INT_CST_LOW (form);
15905 gcc_assert (mode0 == mode1);
15907 if (arg0 == error_mark_node || arg1 == error_mark_node)
15908 return const0_rtx;
15910 if (target == 0
15911 || GET_MODE (target) != SImode
15912 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15913 target = gen_reg_rtx (SImode);
15914 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15915 op0 = copy_to_mode_reg (mode0, op0);
15916 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15917 op1 = copy_to_mode_reg (mode1, op1);
15919 scratch = gen_reg_rtx (CCFPmode);
15921 pat = GEN_FCN (icode) (scratch, op0, op1);
15922 if (!pat)
15923 return const0_rtx;
15925 emit_insn (pat);
15927 switch (form_int)
15929 /* LT bit. */
15930 case 0:
15931 code = LT;
15932 break;
15933 /* GT bit. */
15934 case 1:
15935 code = GT;
15936 break;
15937 /* EQ bit. */
15938 case 2:
15939 code = EQ;
15940 break;
15941 /* UN bit. */
15942 case 3:
15943 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15944 return target;
15945 default:
15946 error ("argument 1 of __builtin_paired_predicate is out of range");
15947 return const0_rtx;
15950 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15951 emit_move_insn (target, tmp);
15952 return target;
15955 /* Raise an error message for a builtin function that is called without the
15956 appropriate target options being set. */
15958 static void
15959 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15961 size_t uns_fncode = (size_t)fncode;
15962 const char *name = rs6000_builtin_info[uns_fncode].name;
15963 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15965 gcc_assert (name != NULL);
15966 if ((fnmask & RS6000_BTM_CELL) != 0)
15967 error ("Builtin function %s is only valid for the cell processor", name);
15968 else if ((fnmask & RS6000_BTM_VSX) != 0)
15969 error ("Builtin function %s requires the -mvsx option", name);
15970 else if ((fnmask & RS6000_BTM_HTM) != 0)
15971 error ("Builtin function %s requires the -mhtm option", name);
15972 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15973 error ("Builtin function %s requires the -maltivec option", name);
15974 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
15975 error ("Builtin function %s requires the -mpaired option", name);
15976 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15977 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15978 error ("Builtin function %s requires the -mhard-dfp and"
15979 " -mpower8-vector options", name);
15980 else if ((fnmask & RS6000_BTM_DFP) != 0)
15981 error ("Builtin function %s requires the -mhard-dfp option", name);
15982 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15983 error ("Builtin function %s requires the -mpower8-vector option", name);
15984 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15985 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15986 error ("Builtin function %s requires the -mcpu=power9 and"
15987 " -m64 options", name);
15988 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15989 error ("Builtin function %s requires the -mcpu=power9 option", name);
15990 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15991 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15992 error ("Builtin function %s requires the -mcpu=power9 and"
15993 " -m64 options", name);
15994 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15995 error ("Builtin function %s requires the -mcpu=power9 option", name);
15996 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15997 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15998 error ("Builtin function %s requires the -mhard-float and"
15999 " -mlong-double-128 options", name);
16000 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16001 error ("Builtin function %s requires the -mhard-float option", name);
16002 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16003 error ("Builtin function %s requires the -mfloat128 option", name);
16004 else
16005 error ("Builtin function %s is not supported with the current options",
16006 name);
16009 /* Target hook for early folding of built-ins, shamelessly stolen
16010 from ia64.c. */
16012 static tree
16013 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
16014 tree *args, bool ignore ATTRIBUTE_UNUSED)
16016 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
16018 enum rs6000_builtins fn_code
16019 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16020 switch (fn_code)
16022 case RS6000_BUILTIN_NANQ:
16023 case RS6000_BUILTIN_NANSQ:
16025 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16026 const char *str = c_getstr (*args);
16027 int quiet = fn_code == RS6000_BUILTIN_NANQ;
16028 REAL_VALUE_TYPE real;
16030 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
16031 return build_real (type, real);
16032 return NULL_TREE;
16034 case RS6000_BUILTIN_INFQ:
16035 case RS6000_BUILTIN_HUGE_VALQ:
16037 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16038 REAL_VALUE_TYPE inf;
16039 real_inf (&inf);
16040 return build_real (type, inf);
16042 default:
16043 break;
16046 #ifdef SUBTARGET_FOLD_BUILTIN
16047 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16048 #else
16049 return NULL_TREE;
16050 #endif
16053 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16054 a constant, use rs6000_fold_builtin.) */
16056 bool
16057 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16059 gimple *stmt = gsi_stmt (*gsi);
16060 tree fndecl = gimple_call_fndecl (stmt);
16061 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16062 enum rs6000_builtins fn_code
16063 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16064 tree arg0, arg1, lhs;
16066 /* Generic solution to prevent gimple folding of code without a LHS. */
16067 if (!gimple_call_lhs (stmt))
16068 return false;
16070 switch (fn_code)
16072 /* Flavors of vec_add. We deliberately don't expand
16073 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16074 TImode, resulting in much poorer code generation. */
16075 case ALTIVEC_BUILTIN_VADDUBM:
16076 case ALTIVEC_BUILTIN_VADDUHM:
16077 case ALTIVEC_BUILTIN_VADDUWM:
16078 case P8V_BUILTIN_VADDUDM:
16079 case ALTIVEC_BUILTIN_VADDFP:
16080 case VSX_BUILTIN_XVADDDP:
16082 arg0 = gimple_call_arg (stmt, 0);
16083 arg1 = gimple_call_arg (stmt, 1);
16084 lhs = gimple_call_lhs (stmt);
16085 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16086 gimple_set_location (g, gimple_location (stmt));
16087 gsi_replace (gsi, g, true);
16088 return true;
16090 /* Flavors of vec_sub. We deliberately don't expand
16091 P8V_BUILTIN_VSUBUQM. */
16092 case ALTIVEC_BUILTIN_VSUBUBM:
16093 case ALTIVEC_BUILTIN_VSUBUHM:
16094 case ALTIVEC_BUILTIN_VSUBUWM:
16095 case P8V_BUILTIN_VSUBUDM:
16096 case ALTIVEC_BUILTIN_VSUBFP:
16097 case VSX_BUILTIN_XVSUBDP:
16099 arg0 = gimple_call_arg (stmt, 0);
16100 arg1 = gimple_call_arg (stmt, 1);
16101 lhs = gimple_call_lhs (stmt);
16102 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16103 gimple_set_location (g, gimple_location (stmt));
16104 gsi_replace (gsi, g, true);
16105 return true;
16107 case VSX_BUILTIN_XVMULSP:
16108 case VSX_BUILTIN_XVMULDP:
16110 arg0 = gimple_call_arg (stmt, 0);
16111 arg1 = gimple_call_arg (stmt, 1);
16112 lhs = gimple_call_lhs (stmt);
16113 gimple *g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16114 gimple_set_location (g, gimple_location (stmt));
16115 gsi_replace (gsi, g, true);
16116 return true;
16118 /* Even element flavors of vec_mul (signed). */
16119 case ALTIVEC_BUILTIN_VMULESB:
16120 case ALTIVEC_BUILTIN_VMULESH:
16121 /* Even element flavors of vec_mul (unsigned). */
16122 case ALTIVEC_BUILTIN_VMULEUB:
16123 case ALTIVEC_BUILTIN_VMULEUH:
16125 arg0 = gimple_call_arg (stmt, 0);
16126 arg1 = gimple_call_arg (stmt, 1);
16127 lhs = gimple_call_lhs (stmt);
16128 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16129 gimple_set_location (g, gimple_location (stmt));
16130 gsi_replace (gsi, g, true);
16131 return true;
16133 /* Odd element flavors of vec_mul (signed). */
16134 case ALTIVEC_BUILTIN_VMULOSB:
16135 case ALTIVEC_BUILTIN_VMULOSH:
16136 /* Odd element flavors of vec_mul (unsigned). */
16137 case ALTIVEC_BUILTIN_VMULOUB:
16138 case ALTIVEC_BUILTIN_VMULOUH:
16140 arg0 = gimple_call_arg (stmt, 0);
16141 arg1 = gimple_call_arg (stmt, 1);
16142 lhs = gimple_call_lhs (stmt);
16143 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16144 gimple_set_location (g, gimple_location (stmt));
16145 gsi_replace (gsi, g, true);
16146 return true;
16148 /* Flavors of vec_div (Integer). */
16149 case VSX_BUILTIN_DIV_V2DI:
16150 case VSX_BUILTIN_UDIV_V2DI:
16152 arg0 = gimple_call_arg (stmt, 0);
16153 arg1 = gimple_call_arg (stmt, 1);
16154 lhs = gimple_call_lhs (stmt);
16155 gimple *g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16156 gimple_set_location (g, gimple_location (stmt));
16157 gsi_replace (gsi, g, true);
16158 return true;
16160 /* Flavors of vec_div (Float). */
16161 case VSX_BUILTIN_XVDIVSP:
16162 case VSX_BUILTIN_XVDIVDP:
16164 arg0 = gimple_call_arg (stmt, 0);
16165 arg1 = gimple_call_arg (stmt, 1);
16166 lhs = gimple_call_lhs (stmt);
16167 gimple *g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16168 gimple_set_location (g, gimple_location (stmt));
16169 gsi_replace (gsi, g, true);
16170 return true;
16172 /* Flavors of vec_and. */
16173 case ALTIVEC_BUILTIN_VAND:
16175 arg0 = gimple_call_arg (stmt, 0);
16176 arg1 = gimple_call_arg (stmt, 1);
16177 lhs = gimple_call_lhs (stmt);
16178 gimple *g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16179 gimple_set_location (g, gimple_location (stmt));
16180 gsi_replace (gsi, g, true);
16181 return true;
16183 /* Flavors of vec_andc. */
16184 case ALTIVEC_BUILTIN_VANDC:
16186 arg0 = gimple_call_arg (stmt, 0);
16187 arg1 = gimple_call_arg (stmt, 1);
16188 lhs = gimple_call_lhs (stmt);
16189 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16190 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16191 gimple_set_location (g, gimple_location (stmt));
16192 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16193 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16194 gimple_set_location (g, gimple_location (stmt));
16195 gsi_replace (gsi, g, true);
16196 return true;
16198 /* Flavors of vec_nand. */
16199 case P8V_BUILTIN_VEC_NAND:
16200 case P8V_BUILTIN_NAND_V16QI:
16201 case P8V_BUILTIN_NAND_V8HI:
16202 case P8V_BUILTIN_NAND_V4SI:
16203 case P8V_BUILTIN_NAND_V4SF:
16204 case P8V_BUILTIN_NAND_V2DF:
16205 case P8V_BUILTIN_NAND_V2DI:
16207 arg0 = gimple_call_arg (stmt, 0);
16208 arg1 = gimple_call_arg (stmt, 1);
16209 lhs = gimple_call_lhs (stmt);
16210 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16211 gimple *g = gimple_build_assign(temp, BIT_AND_EXPR, arg0, arg1);
16212 gimple_set_location (g, gimple_location (stmt));
16213 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16214 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16215 gimple_set_location (g, gimple_location (stmt));
16216 gsi_replace (gsi, g, true);
16217 return true;
16219 /* Flavors of vec_or. */
16220 case ALTIVEC_BUILTIN_VOR:
16222 arg0 = gimple_call_arg (stmt, 0);
16223 arg1 = gimple_call_arg (stmt, 1);
16224 lhs = gimple_call_lhs (stmt);
16225 gimple *g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16226 gimple_set_location (g, gimple_location (stmt));
16227 gsi_replace (gsi, g, true);
16228 return true;
16230 /* flavors of vec_orc. */
16231 case P8V_BUILTIN_ORC_V16QI:
16232 case P8V_BUILTIN_ORC_V8HI:
16233 case P8V_BUILTIN_ORC_V4SI:
16234 case P8V_BUILTIN_ORC_V4SF:
16235 case P8V_BUILTIN_ORC_V2DF:
16236 case P8V_BUILTIN_ORC_V2DI:
16238 arg0 = gimple_call_arg (stmt, 0);
16239 arg1 = gimple_call_arg (stmt, 1);
16240 lhs = gimple_call_lhs (stmt);
16241 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16242 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16243 gimple_set_location (g, gimple_location (stmt));
16244 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16245 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16246 gimple_set_location (g, gimple_location (stmt));
16247 gsi_replace (gsi, g, true);
16248 return true;
16250 /* Flavors of vec_xor. */
16251 case ALTIVEC_BUILTIN_VXOR:
16253 arg0 = gimple_call_arg (stmt, 0);
16254 arg1 = gimple_call_arg (stmt, 1);
16255 lhs = gimple_call_lhs (stmt);
16256 gimple *g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16257 gimple_set_location (g, gimple_location (stmt));
16258 gsi_replace (gsi, g, true);
16259 return true;
16261 /* Flavors of vec_nor. */
16262 case ALTIVEC_BUILTIN_VNOR:
16264 arg0 = gimple_call_arg (stmt, 0);
16265 arg1 = gimple_call_arg (stmt, 1);
16266 lhs = gimple_call_lhs (stmt);
16267 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16268 gimple *g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16269 gimple_set_location (g, gimple_location (stmt));
16270 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16271 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16272 gimple_set_location (g, gimple_location (stmt));
16273 gsi_replace (gsi, g, true);
16274 return true;
16276 /* flavors of vec_abs. */
16277 case ALTIVEC_BUILTIN_ABS_V16QI:
16278 case ALTIVEC_BUILTIN_ABS_V8HI:
16279 case ALTIVEC_BUILTIN_ABS_V4SI:
16280 case ALTIVEC_BUILTIN_ABS_V4SF:
16281 case P8V_BUILTIN_ABS_V2DI:
16282 case VSX_BUILTIN_XVABSDP:
16284 arg0 = gimple_call_arg (stmt, 0);
16285 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16286 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16287 return false;
16288 lhs = gimple_call_lhs (stmt);
16289 gimple *g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16290 gimple_set_location (g, gimple_location (stmt));
16291 gsi_replace (gsi, g, true);
16292 return true;
16294 /* flavors of vec_min. */
16295 case VSX_BUILTIN_XVMINDP:
16296 case P8V_BUILTIN_VMINSD:
16297 case P8V_BUILTIN_VMINUD:
16298 case ALTIVEC_BUILTIN_VMINSB:
16299 case ALTIVEC_BUILTIN_VMINSH:
16300 case ALTIVEC_BUILTIN_VMINSW:
16301 case ALTIVEC_BUILTIN_VMINUB:
16302 case ALTIVEC_BUILTIN_VMINUH:
16303 case ALTIVEC_BUILTIN_VMINUW:
16304 case ALTIVEC_BUILTIN_VMINFP:
16306 arg0 = gimple_call_arg (stmt, 0);
16307 arg1 = gimple_call_arg (stmt, 1);
16308 lhs = gimple_call_lhs (stmt);
16309 gimple *g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16310 gimple_set_location (g, gimple_location (stmt));
16311 gsi_replace (gsi, g, true);
16312 return true;
16314 /* flavors of vec_max. */
16315 case VSX_BUILTIN_XVMAXDP:
16316 case P8V_BUILTIN_VMAXSD:
16317 case P8V_BUILTIN_VMAXUD:
16318 case ALTIVEC_BUILTIN_VMAXSB:
16319 case ALTIVEC_BUILTIN_VMAXSH:
16320 case ALTIVEC_BUILTIN_VMAXSW:
16321 case ALTIVEC_BUILTIN_VMAXUB:
16322 case ALTIVEC_BUILTIN_VMAXUH:
16323 case ALTIVEC_BUILTIN_VMAXUW:
16324 case ALTIVEC_BUILTIN_VMAXFP:
16326 arg0 = gimple_call_arg (stmt, 0);
16327 arg1 = gimple_call_arg (stmt, 1);
16328 lhs = gimple_call_lhs (stmt);
16329 gimple *g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16330 gimple_set_location (g, gimple_location (stmt));
16331 gsi_replace (gsi, g, true);
16332 return true;
16334 /* Flavors of vec_eqv. */
16335 case P8V_BUILTIN_EQV_V16QI:
16336 case P8V_BUILTIN_EQV_V8HI:
16337 case P8V_BUILTIN_EQV_V4SI:
16338 case P8V_BUILTIN_EQV_V4SF:
16339 case P8V_BUILTIN_EQV_V2DF:
16340 case P8V_BUILTIN_EQV_V2DI:
16342 arg0 = gimple_call_arg (stmt, 0);
16343 arg1 = gimple_call_arg (stmt, 1);
16344 lhs = gimple_call_lhs (stmt);
16345 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16346 gimple *g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16347 gimple_set_location (g, gimple_location (stmt));
16348 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16349 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16350 gimple_set_location (g, gimple_location (stmt));
16351 gsi_replace (gsi, g, true);
16352 return true;
16354 /* Flavors of vec_rotate_left. */
16355 case ALTIVEC_BUILTIN_VRLB:
16356 case ALTIVEC_BUILTIN_VRLH:
16357 case ALTIVEC_BUILTIN_VRLW:
16358 case P8V_BUILTIN_VRLD:
16360 arg0 = gimple_call_arg (stmt, 0);
16361 arg1 = gimple_call_arg (stmt, 1);
16362 lhs = gimple_call_lhs (stmt);
16363 gimple *g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16364 gimple_set_location (g, gimple_location (stmt));
16365 gsi_replace (gsi, g, true);
16366 return true;
16368 /* Flavors of vector shift right algebraic.
16369 vec_sra{b,h,w} -> vsra{b,h,w}. */
16370 case ALTIVEC_BUILTIN_VSRAB:
16371 case ALTIVEC_BUILTIN_VSRAH:
16372 case ALTIVEC_BUILTIN_VSRAW:
16373 case P8V_BUILTIN_VSRAD:
16375 arg0 = gimple_call_arg (stmt, 0);
16376 arg1 = gimple_call_arg (stmt, 1);
16377 lhs = gimple_call_lhs (stmt);
16378 gimple *g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16379 gimple_set_location (g, gimple_location (stmt));
16380 gsi_replace (gsi, g, true);
16381 return true;
16383 /* Flavors of vector shift left.
16384 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16385 case ALTIVEC_BUILTIN_VSLB:
16386 case ALTIVEC_BUILTIN_VSLH:
16387 case ALTIVEC_BUILTIN_VSLW:
16388 case P8V_BUILTIN_VSLD:
16390 arg0 = gimple_call_arg (stmt, 0);
16391 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16392 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16393 return false;
16394 arg1 = gimple_call_arg (stmt, 1);
16395 lhs = gimple_call_lhs (stmt);
16396 gimple *g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16397 gimple_set_location (g, gimple_location (stmt));
16398 gsi_replace (gsi, g, true);
16399 return true;
16401 /* Flavors of vector shift right. */
16402 case ALTIVEC_BUILTIN_VSRB:
16403 case ALTIVEC_BUILTIN_VSRH:
16404 case ALTIVEC_BUILTIN_VSRW:
16405 case P8V_BUILTIN_VSRD:
16407 arg0 = gimple_call_arg (stmt, 0);
16408 arg1 = gimple_call_arg (stmt, 1);
16409 lhs = gimple_call_lhs (stmt);
16410 gimple_seq stmts = NULL;
16411 /* Convert arg0 to unsigned. */
16412 tree arg0_unsigned
16413 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16414 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16415 tree res
16416 = gimple_build (&stmts, RSHIFT_EXPR,
16417 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16418 /* Convert result back to the lhs type. */
16419 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16420 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16421 update_call_from_tree (gsi, res);
16422 return true;
16424 default:
16425 break;
16428 return false;
16431 /* Expand an expression EXP that calls a built-in function,
16432 with result going to TARGET if that's convenient
16433 (and in mode MODE if that's convenient).
16434 SUBTARGET may be used as the target for computing one of EXP's operands.
16435 IGNORE is nonzero if the value is to be ignored. */
16437 static rtx
16438 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16439 machine_mode mode ATTRIBUTE_UNUSED,
16440 int ignore ATTRIBUTE_UNUSED)
16442 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16443 enum rs6000_builtins fcode
16444 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16445 size_t uns_fcode = (size_t)fcode;
16446 const struct builtin_description *d;
16447 size_t i;
16448 rtx ret;
16449 bool success;
16450 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16451 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16453 if (TARGET_DEBUG_BUILTIN)
16455 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16456 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16457 const char *name2 = ((icode != CODE_FOR_nothing)
16458 ? get_insn_name ((int)icode)
16459 : "nothing");
16460 const char *name3;
16462 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16464 default: name3 = "unknown"; break;
16465 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16466 case RS6000_BTC_UNARY: name3 = "unary"; break;
16467 case RS6000_BTC_BINARY: name3 = "binary"; break;
16468 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16469 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16470 case RS6000_BTC_ABS: name3 = "abs"; break;
16471 case RS6000_BTC_DST: name3 = "dst"; break;
16475 fprintf (stderr,
16476 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16477 (name1) ? name1 : "---", fcode,
16478 (name2) ? name2 : "---", (int)icode,
16479 name3,
16480 func_valid_p ? "" : ", not valid");
16483 if (!func_valid_p)
16485 rs6000_invalid_builtin (fcode);
16487 /* Given it is invalid, just generate a normal call. */
16488 return expand_call (exp, target, ignore);
16491 switch (fcode)
16493 case RS6000_BUILTIN_RECIP:
16494 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16496 case RS6000_BUILTIN_RECIPF:
16497 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16499 case RS6000_BUILTIN_RSQRTF:
16500 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16502 case RS6000_BUILTIN_RSQRT:
16503 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16505 case POWER7_BUILTIN_BPERMD:
16506 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16507 ? CODE_FOR_bpermd_di
16508 : CODE_FOR_bpermd_si), exp, target);
16510 case RS6000_BUILTIN_GET_TB:
16511 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16512 target);
16514 case RS6000_BUILTIN_MFTB:
16515 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16516 ? CODE_FOR_rs6000_mftb_di
16517 : CODE_FOR_rs6000_mftb_si),
16518 target);
16520 case RS6000_BUILTIN_MFFS:
16521 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16523 case RS6000_BUILTIN_MTFSF:
16524 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16526 case RS6000_BUILTIN_CPU_INIT:
16527 case RS6000_BUILTIN_CPU_IS:
16528 case RS6000_BUILTIN_CPU_SUPPORTS:
16529 return cpu_expand_builtin (fcode, exp, target);
16531 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16532 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16534 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16535 : (int) CODE_FOR_altivec_lvsl_direct);
16536 machine_mode tmode = insn_data[icode].operand[0].mode;
16537 machine_mode mode = insn_data[icode].operand[1].mode;
16538 tree arg;
16539 rtx op, addr, pat;
16541 gcc_assert (TARGET_ALTIVEC);
16543 arg = CALL_EXPR_ARG (exp, 0);
16544 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16545 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16546 addr = memory_address (mode, op);
16547 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16548 op = addr;
16549 else
16551 /* For the load case need to negate the address. */
16552 op = gen_reg_rtx (GET_MODE (addr));
16553 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16555 op = gen_rtx_MEM (mode, op);
16557 if (target == 0
16558 || GET_MODE (target) != tmode
16559 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16560 target = gen_reg_rtx (tmode);
16562 pat = GEN_FCN (icode) (target, op);
16563 if (!pat)
16564 return 0;
16565 emit_insn (pat);
16567 return target;
16570 case ALTIVEC_BUILTIN_VCFUX:
16571 case ALTIVEC_BUILTIN_VCFSX:
16572 case ALTIVEC_BUILTIN_VCTUXS:
16573 case ALTIVEC_BUILTIN_VCTSXS:
16574 /* FIXME: There's got to be a nicer way to handle this case than
16575 constructing a new CALL_EXPR. */
16576 if (call_expr_nargs (exp) == 1)
16578 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16579 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16581 break;
16583 default:
16584 break;
16587 if (TARGET_ALTIVEC)
16589 ret = altivec_expand_builtin (exp, target, &success);
16591 if (success)
16592 return ret;
16594 if (TARGET_PAIRED_FLOAT)
16596 ret = paired_expand_builtin (exp, target, &success);
16598 if (success)
16599 return ret;
16601 if (TARGET_HTM)
16603 ret = htm_expand_builtin (exp, target, &success);
16605 if (success)
16606 return ret;
16609 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16610 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16611 gcc_assert (attr == RS6000_BTC_UNARY
16612 || attr == RS6000_BTC_BINARY
16613 || attr == RS6000_BTC_TERNARY
16614 || attr == RS6000_BTC_SPECIAL);
16616 /* Handle simple unary operations. */
16617 d = bdesc_1arg;
16618 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16619 if (d->code == fcode)
16620 return rs6000_expand_unop_builtin (d->icode, exp, target);
16622 /* Handle simple binary operations. */
16623 d = bdesc_2arg;
16624 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16625 if (d->code == fcode)
16626 return rs6000_expand_binop_builtin (d->icode, exp, target);
16628 /* Handle simple ternary operations. */
16629 d = bdesc_3arg;
16630 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16631 if (d->code == fcode)
16632 return rs6000_expand_ternop_builtin (d->icode, exp, target);
16634 /* Handle simple no-argument operations. */
16635 d = bdesc_0arg;
16636 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16637 if (d->code == fcode)
16638 return rs6000_expand_zeroop_builtin (d->icode, target);
16640 gcc_unreachable ();
16643 /* Create a builtin vector type with a name. Taking care not to give
16644 the canonical type a name. */
16646 static tree
16647 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16649 tree result = build_vector_type (elt_type, num_elts);
16651 /* Copy so we don't give the canonical type a name. */
16652 result = build_variant_type_copy (result);
16654 add_builtin_type (name, result);
16656 return result;
16659 static void
16660 rs6000_init_builtins (void)
16662 tree tdecl;
16663 tree ftype;
16664 machine_mode mode;
16666 if (TARGET_DEBUG_BUILTIN)
16667 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
16668 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16669 (TARGET_ALTIVEC) ? ", altivec" : "",
16670 (TARGET_VSX) ? ", vsx" : "");
16672 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16673 V2SF_type_node = build_vector_type (float_type_node, 2);
16674 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16675 : "__vector long long",
16676 intDI_type_node, 2);
16677 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16678 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16679 intSI_type_node, 4);
16680 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16681 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16682 intHI_type_node, 8);
16683 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16684 intQI_type_node, 16);
16686 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16687 unsigned_intQI_type_node, 16);
16688 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16689 unsigned_intHI_type_node, 8);
16690 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16691 unsigned_intSI_type_node, 4);
16692 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16693 ? "__vector unsigned long"
16694 : "__vector unsigned long long",
16695 unsigned_intDI_type_node, 2);
16697 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16698 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16699 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16700 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16702 const_str_type_node
16703 = build_pointer_type (build_qualified_type (char_type_node,
16704 TYPE_QUAL_CONST));
16706 /* We use V1TI mode as a special container to hold __int128_t items that
16707 must live in VSX registers. */
16708 if (intTI_type_node)
16710 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16711 intTI_type_node, 1);
16712 unsigned_V1TI_type_node
16713 = rs6000_vector_type ("__vector unsigned __int128",
16714 unsigned_intTI_type_node, 1);
16717 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16718 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16719 'vector unsigned short'. */
16721 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16722 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16723 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16724 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16725 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16727 long_integer_type_internal_node = long_integer_type_node;
16728 long_unsigned_type_internal_node = long_unsigned_type_node;
16729 long_long_integer_type_internal_node = long_long_integer_type_node;
16730 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16731 intQI_type_internal_node = intQI_type_node;
16732 uintQI_type_internal_node = unsigned_intQI_type_node;
16733 intHI_type_internal_node = intHI_type_node;
16734 uintHI_type_internal_node = unsigned_intHI_type_node;
16735 intSI_type_internal_node = intSI_type_node;
16736 uintSI_type_internal_node = unsigned_intSI_type_node;
16737 intDI_type_internal_node = intDI_type_node;
16738 uintDI_type_internal_node = unsigned_intDI_type_node;
16739 intTI_type_internal_node = intTI_type_node;
16740 uintTI_type_internal_node = unsigned_intTI_type_node;
16741 float_type_internal_node = float_type_node;
16742 double_type_internal_node = double_type_node;
16743 long_double_type_internal_node = long_double_type_node;
16744 dfloat64_type_internal_node = dfloat64_type_node;
16745 dfloat128_type_internal_node = dfloat128_type_node;
16746 void_type_internal_node = void_type_node;
16748 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16749 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16750 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16751 format that uses a pair of doubles, depending on the switches and
16752 defaults.
16754 We do not enable the actual __float128 keyword unless the user explicitly
16755 asks for it, because the library support is not yet complete.
16757 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16758 floating point, we need make sure the type is non-zero or else self-test
16759 fails during bootstrap.
16761 We don't register a built-in type for __ibm128 if the type is the same as
16762 long double. Instead we add a #define for __ibm128 in
16763 rs6000_cpu_cpp_builtins to long double. */
16764 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
16766 ibm128_float_type_node = make_node (REAL_TYPE);
16767 TYPE_PRECISION (ibm128_float_type_node) = 128;
16768 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16769 layout_type (ibm128_float_type_node);
16771 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16772 "__ibm128");
16774 else
16775 ibm128_float_type_node = long_double_type_node;
16777 if (TARGET_FLOAT128_KEYWORD)
16779 ieee128_float_type_node = float128_type_node;
16780 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16781 "__float128");
16784 else if (TARGET_FLOAT128_TYPE)
16786 ieee128_float_type_node = make_node (REAL_TYPE);
16787 TYPE_PRECISION (ibm128_float_type_node) = 128;
16788 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
16789 layout_type (ieee128_float_type_node);
16791 /* If we are not exporting the __float128/_Float128 keywords, we need a
16792 keyword to get the types created. Use __ieee128 as the dummy
16793 keyword. */
16794 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16795 "__ieee128");
16798 else
16799 ieee128_float_type_node = long_double_type_node;
16801 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16802 tree type node. */
16803 builtin_mode_to_type[QImode][0] = integer_type_node;
16804 builtin_mode_to_type[HImode][0] = integer_type_node;
16805 builtin_mode_to_type[SImode][0] = intSI_type_node;
16806 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16807 builtin_mode_to_type[DImode][0] = intDI_type_node;
16808 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16809 builtin_mode_to_type[TImode][0] = intTI_type_node;
16810 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16811 builtin_mode_to_type[SFmode][0] = float_type_node;
16812 builtin_mode_to_type[DFmode][0] = double_type_node;
16813 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16814 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16815 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16816 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16817 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16818 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16819 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16820 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
16821 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
16822 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16823 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16824 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16825 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16826 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16827 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16828 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16829 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16830 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16831 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16833 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16834 TYPE_NAME (bool_char_type_node) = tdecl;
16836 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16837 TYPE_NAME (bool_short_type_node) = tdecl;
16839 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16840 TYPE_NAME (bool_int_type_node) = tdecl;
16842 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16843 TYPE_NAME (pixel_type_node) = tdecl;
16845 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16846 bool_char_type_node, 16);
16847 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16848 bool_short_type_node, 8);
16849 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16850 bool_int_type_node, 4);
16851 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16852 ? "__vector __bool long"
16853 : "__vector __bool long long",
16854 bool_long_type_node, 2);
16855 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16856 pixel_type_node, 8);
16858 /* Paired builtins are only available if you build a compiler with the
16859 appropriate options, so only create those builtins with the appropriate
16860 compiler option. Create Altivec and VSX builtins on machines with at
16861 least the general purpose extensions (970 and newer) to allow the use of
16862 the target attribute. */
16863 if (TARGET_PAIRED_FLOAT)
16864 paired_init_builtins ();
16865 if (TARGET_EXTRA_BUILTINS)
16866 altivec_init_builtins ();
16867 if (TARGET_HTM)
16868 htm_init_builtins ();
16870 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
16871 rs6000_common_init_builtins ();
16873 ftype = build_function_type_list (ieee128_float_type_node,
16874 const_str_type_node, NULL_TREE);
16875 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
16876 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
16878 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
16879 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
16880 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
16882 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16883 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16884 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16886 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16887 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16888 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16890 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16891 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16892 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16894 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16895 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16896 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16898 mode = (TARGET_64BIT) ? DImode : SImode;
16899 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16900 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16901 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16903 ftype = build_function_type_list (unsigned_intDI_type_node,
16904 NULL_TREE);
16905 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16907 if (TARGET_64BIT)
16908 ftype = build_function_type_list (unsigned_intDI_type_node,
16909 NULL_TREE);
16910 else
16911 ftype = build_function_type_list (unsigned_intSI_type_node,
16912 NULL_TREE);
16913 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16915 ftype = build_function_type_list (double_type_node, NULL_TREE);
16916 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16918 ftype = build_function_type_list (void_type_node,
16919 intSI_type_node, double_type_node,
16920 NULL_TREE);
16921 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16923 ftype = build_function_type_list (void_type_node, NULL_TREE);
16924 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16926 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16927 NULL_TREE);
16928 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16929 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16931 /* AIX libm provides clog as __clog. */
16932 if (TARGET_XCOFF &&
16933 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16934 set_user_assembler_name (tdecl, "__clog");
16936 #ifdef SUBTARGET_INIT_BUILTINS
16937 SUBTARGET_INIT_BUILTINS;
16938 #endif
16941 /* Returns the rs6000 builtin decl for CODE. */
16943 static tree
16944 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16946 HOST_WIDE_INT fnmask;
16948 if (code >= RS6000_BUILTIN_COUNT)
16949 return error_mark_node;
16951 fnmask = rs6000_builtin_info[code].mask;
16952 if ((fnmask & rs6000_builtin_mask) != fnmask)
16954 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16955 return error_mark_node;
16958 return rs6000_builtin_decls[code];
16961 static void
16962 paired_init_builtins (void)
16964 const struct builtin_description *d;
16965 size_t i;
16966 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16968 tree int_ftype_int_v2sf_v2sf
16969 = build_function_type_list (integer_type_node,
16970 integer_type_node,
16971 V2SF_type_node,
16972 V2SF_type_node,
16973 NULL_TREE);
16974 tree pcfloat_type_node =
16975 build_pointer_type (build_qualified_type
16976 (float_type_node, TYPE_QUAL_CONST));
16978 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
16979 long_integer_type_node,
16980 pcfloat_type_node,
16981 NULL_TREE);
16982 tree void_ftype_v2sf_long_pcfloat =
16983 build_function_type_list (void_type_node,
16984 V2SF_type_node,
16985 long_integer_type_node,
16986 pcfloat_type_node,
16987 NULL_TREE);
16990 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
16991 PAIRED_BUILTIN_LX);
16994 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
16995 PAIRED_BUILTIN_STX);
16997 /* Predicates. */
16998 d = bdesc_paired_preds;
16999 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17001 tree type;
17002 HOST_WIDE_INT mask = d->mask;
17004 if ((mask & builtin_mask) != mask)
17006 if (TARGET_DEBUG_BUILTIN)
17007 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17008 d->name);
17009 continue;
17012 /* Cannot define builtin if the instruction is disabled. */
17013 gcc_assert (d->icode != CODE_FOR_nothing);
17015 if (TARGET_DEBUG_BUILTIN)
17016 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17017 (int)i, get_insn_name (d->icode), (int)d->icode,
17018 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17020 switch (insn_data[d->icode].operand[1].mode)
17022 case V2SFmode:
17023 type = int_ftype_int_v2sf_v2sf;
17024 break;
17025 default:
17026 gcc_unreachable ();
17029 def_builtin (d->name, type, d->code);
17033 static void
17034 altivec_init_builtins (void)
17036 const struct builtin_description *d;
17037 size_t i;
17038 tree ftype;
17039 tree decl;
17040 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17042 tree pvoid_type_node = build_pointer_type (void_type_node);
17044 tree pcvoid_type_node
17045 = build_pointer_type (build_qualified_type (void_type_node,
17046 TYPE_QUAL_CONST));
17048 tree int_ftype_opaque
17049 = build_function_type_list (integer_type_node,
17050 opaque_V4SI_type_node, NULL_TREE);
17051 tree opaque_ftype_opaque
17052 = build_function_type_list (integer_type_node, NULL_TREE);
17053 tree opaque_ftype_opaque_int
17054 = build_function_type_list (opaque_V4SI_type_node,
17055 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17056 tree opaque_ftype_opaque_opaque_int
17057 = build_function_type_list (opaque_V4SI_type_node,
17058 opaque_V4SI_type_node, opaque_V4SI_type_node,
17059 integer_type_node, NULL_TREE);
17060 tree opaque_ftype_opaque_opaque_opaque
17061 = build_function_type_list (opaque_V4SI_type_node,
17062 opaque_V4SI_type_node, opaque_V4SI_type_node,
17063 opaque_V4SI_type_node, NULL_TREE);
17064 tree opaque_ftype_opaque_opaque
17065 = build_function_type_list (opaque_V4SI_type_node,
17066 opaque_V4SI_type_node, opaque_V4SI_type_node,
17067 NULL_TREE);
17068 tree int_ftype_int_opaque_opaque
17069 = build_function_type_list (integer_type_node,
17070 integer_type_node, opaque_V4SI_type_node,
17071 opaque_V4SI_type_node, NULL_TREE);
17072 tree int_ftype_int_v4si_v4si
17073 = build_function_type_list (integer_type_node,
17074 integer_type_node, V4SI_type_node,
17075 V4SI_type_node, NULL_TREE);
17076 tree int_ftype_int_v2di_v2di
17077 = build_function_type_list (integer_type_node,
17078 integer_type_node, V2DI_type_node,
17079 V2DI_type_node, NULL_TREE);
17080 tree void_ftype_v4si
17081 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17082 tree v8hi_ftype_void
17083 = build_function_type_list (V8HI_type_node, NULL_TREE);
17084 tree void_ftype_void
17085 = build_function_type_list (void_type_node, NULL_TREE);
17086 tree void_ftype_int
17087 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17089 tree opaque_ftype_long_pcvoid
17090 = build_function_type_list (opaque_V4SI_type_node,
17091 long_integer_type_node, pcvoid_type_node,
17092 NULL_TREE);
17093 tree v16qi_ftype_long_pcvoid
17094 = build_function_type_list (V16QI_type_node,
17095 long_integer_type_node, pcvoid_type_node,
17096 NULL_TREE);
17097 tree v8hi_ftype_long_pcvoid
17098 = build_function_type_list (V8HI_type_node,
17099 long_integer_type_node, pcvoid_type_node,
17100 NULL_TREE);
17101 tree v4si_ftype_long_pcvoid
17102 = build_function_type_list (V4SI_type_node,
17103 long_integer_type_node, pcvoid_type_node,
17104 NULL_TREE);
17105 tree v4sf_ftype_long_pcvoid
17106 = build_function_type_list (V4SF_type_node,
17107 long_integer_type_node, pcvoid_type_node,
17108 NULL_TREE);
17109 tree v2df_ftype_long_pcvoid
17110 = build_function_type_list (V2DF_type_node,
17111 long_integer_type_node, pcvoid_type_node,
17112 NULL_TREE);
17113 tree v2di_ftype_long_pcvoid
17114 = build_function_type_list (V2DI_type_node,
17115 long_integer_type_node, pcvoid_type_node,
17116 NULL_TREE);
17118 tree void_ftype_opaque_long_pvoid
17119 = build_function_type_list (void_type_node,
17120 opaque_V4SI_type_node, long_integer_type_node,
17121 pvoid_type_node, NULL_TREE);
17122 tree void_ftype_v4si_long_pvoid
17123 = build_function_type_list (void_type_node,
17124 V4SI_type_node, long_integer_type_node,
17125 pvoid_type_node, NULL_TREE);
17126 tree void_ftype_v16qi_long_pvoid
17127 = build_function_type_list (void_type_node,
17128 V16QI_type_node, long_integer_type_node,
17129 pvoid_type_node, NULL_TREE);
17131 tree void_ftype_v16qi_pvoid_long
17132 = build_function_type_list (void_type_node,
17133 V16QI_type_node, pvoid_type_node,
17134 long_integer_type_node, NULL_TREE);
17136 tree void_ftype_v8hi_long_pvoid
17137 = build_function_type_list (void_type_node,
17138 V8HI_type_node, long_integer_type_node,
17139 pvoid_type_node, NULL_TREE);
17140 tree void_ftype_v4sf_long_pvoid
17141 = build_function_type_list (void_type_node,
17142 V4SF_type_node, long_integer_type_node,
17143 pvoid_type_node, NULL_TREE);
17144 tree void_ftype_v2df_long_pvoid
17145 = build_function_type_list (void_type_node,
17146 V2DF_type_node, long_integer_type_node,
17147 pvoid_type_node, NULL_TREE);
17148 tree void_ftype_v2di_long_pvoid
17149 = build_function_type_list (void_type_node,
17150 V2DI_type_node, long_integer_type_node,
17151 pvoid_type_node, NULL_TREE);
17152 tree int_ftype_int_v8hi_v8hi
17153 = build_function_type_list (integer_type_node,
17154 integer_type_node, V8HI_type_node,
17155 V8HI_type_node, NULL_TREE);
17156 tree int_ftype_int_v16qi_v16qi
17157 = build_function_type_list (integer_type_node,
17158 integer_type_node, V16QI_type_node,
17159 V16QI_type_node, NULL_TREE);
17160 tree int_ftype_int_v4sf_v4sf
17161 = build_function_type_list (integer_type_node,
17162 integer_type_node, V4SF_type_node,
17163 V4SF_type_node, NULL_TREE);
17164 tree int_ftype_int_v2df_v2df
17165 = build_function_type_list (integer_type_node,
17166 integer_type_node, V2DF_type_node,
17167 V2DF_type_node, NULL_TREE);
17168 tree v2di_ftype_v2di
17169 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17170 tree v4si_ftype_v4si
17171 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17172 tree v8hi_ftype_v8hi
17173 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17174 tree v16qi_ftype_v16qi
17175 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17176 tree v4sf_ftype_v4sf
17177 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17178 tree v2df_ftype_v2df
17179 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17180 tree void_ftype_pcvoid_int_int
17181 = build_function_type_list (void_type_node,
17182 pcvoid_type_node, integer_type_node,
17183 integer_type_node, NULL_TREE);
17185 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17186 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17187 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17188 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17189 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17190 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17191 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17192 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17193 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17194 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17195 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17196 ALTIVEC_BUILTIN_LVXL_V2DF);
17197 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17198 ALTIVEC_BUILTIN_LVXL_V2DI);
17199 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17200 ALTIVEC_BUILTIN_LVXL_V4SF);
17201 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17202 ALTIVEC_BUILTIN_LVXL_V4SI);
17203 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17204 ALTIVEC_BUILTIN_LVXL_V8HI);
17205 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17206 ALTIVEC_BUILTIN_LVXL_V16QI);
17207 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17208 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17209 ALTIVEC_BUILTIN_LVX_V2DF);
17210 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17211 ALTIVEC_BUILTIN_LVX_V2DI);
17212 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17213 ALTIVEC_BUILTIN_LVX_V4SF);
17214 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17215 ALTIVEC_BUILTIN_LVX_V4SI);
17216 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17217 ALTIVEC_BUILTIN_LVX_V8HI);
17218 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17219 ALTIVEC_BUILTIN_LVX_V16QI);
17220 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17221 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17222 ALTIVEC_BUILTIN_STVX_V2DF);
17223 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17224 ALTIVEC_BUILTIN_STVX_V2DI);
17225 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17226 ALTIVEC_BUILTIN_STVX_V4SF);
17227 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17228 ALTIVEC_BUILTIN_STVX_V4SI);
17229 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17230 ALTIVEC_BUILTIN_STVX_V8HI);
17231 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17232 ALTIVEC_BUILTIN_STVX_V16QI);
17233 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17234 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17235 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17236 ALTIVEC_BUILTIN_STVXL_V2DF);
17237 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17238 ALTIVEC_BUILTIN_STVXL_V2DI);
17239 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17240 ALTIVEC_BUILTIN_STVXL_V4SF);
17241 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17242 ALTIVEC_BUILTIN_STVXL_V4SI);
17243 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17244 ALTIVEC_BUILTIN_STVXL_V8HI);
17245 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17246 ALTIVEC_BUILTIN_STVXL_V16QI);
17247 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17248 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17249 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17250 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17251 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17252 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17253 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17254 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17255 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17256 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17257 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17258 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17259 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17260 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17261 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17262 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17264 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17265 VSX_BUILTIN_LXVD2X_V2DF);
17266 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17267 VSX_BUILTIN_LXVD2X_V2DI);
17268 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17269 VSX_BUILTIN_LXVW4X_V4SF);
17270 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17271 VSX_BUILTIN_LXVW4X_V4SI);
17272 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17273 VSX_BUILTIN_LXVW4X_V8HI);
17274 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17275 VSX_BUILTIN_LXVW4X_V16QI);
17276 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17277 VSX_BUILTIN_STXVD2X_V2DF);
17278 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17279 VSX_BUILTIN_STXVD2X_V2DI);
17280 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17281 VSX_BUILTIN_STXVW4X_V4SF);
17282 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17283 VSX_BUILTIN_STXVW4X_V4SI);
17284 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17285 VSX_BUILTIN_STXVW4X_V8HI);
17286 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17287 VSX_BUILTIN_STXVW4X_V16QI);
17289 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17290 VSX_BUILTIN_LD_ELEMREV_V2DF);
17291 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17292 VSX_BUILTIN_LD_ELEMREV_V2DI);
17293 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17294 VSX_BUILTIN_LD_ELEMREV_V4SF);
17295 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17296 VSX_BUILTIN_LD_ELEMREV_V4SI);
17297 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17298 VSX_BUILTIN_ST_ELEMREV_V2DF);
17299 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17300 VSX_BUILTIN_ST_ELEMREV_V2DI);
17301 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17302 VSX_BUILTIN_ST_ELEMREV_V4SF);
17303 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17304 VSX_BUILTIN_ST_ELEMREV_V4SI);
17306 if (TARGET_P9_VECTOR)
17308 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17309 VSX_BUILTIN_LD_ELEMREV_V8HI);
17310 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17311 VSX_BUILTIN_LD_ELEMREV_V16QI);
17312 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17313 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
17314 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17315 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
17317 else
17319 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V8HI]
17320 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V8HI];
17321 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V16QI]
17322 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V16QI];
17323 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V8HI]
17324 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V8HI];
17325 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V16QI]
17326 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V16QI];
17329 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17330 VSX_BUILTIN_VEC_LD);
17331 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17332 VSX_BUILTIN_VEC_ST);
17333 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17334 VSX_BUILTIN_VEC_XL);
17335 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17336 VSX_BUILTIN_VEC_XST);
17338 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17339 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17340 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17342 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17343 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17344 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17345 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17346 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17347 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17348 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17349 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17350 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17351 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17352 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17353 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17355 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17356 ALTIVEC_BUILTIN_VEC_ADDE);
17357 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17358 ALTIVEC_BUILTIN_VEC_ADDEC);
17359 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17360 ALTIVEC_BUILTIN_VEC_CMPNE);
17361 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17362 ALTIVEC_BUILTIN_VEC_MUL);
17363 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17364 ALTIVEC_BUILTIN_VEC_SUBE);
17365 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17366 ALTIVEC_BUILTIN_VEC_SUBEC);
17368 /* Cell builtins. */
17369 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17370 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17371 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17372 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17374 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17375 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17376 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17377 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17379 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17380 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17381 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17382 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17384 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17385 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17386 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17387 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17389 if (TARGET_P9_VECTOR)
17390 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17391 P9V_BUILTIN_STXVL);
17393 /* Add the DST variants. */
17394 d = bdesc_dst;
17395 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17397 HOST_WIDE_INT mask = d->mask;
17399 /* It is expected that these dst built-in functions may have
17400 d->icode equal to CODE_FOR_nothing. */
17401 if ((mask & builtin_mask) != mask)
17403 if (TARGET_DEBUG_BUILTIN)
17404 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17405 d->name);
17406 continue;
17408 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17411 /* Initialize the predicates. */
17412 d = bdesc_altivec_preds;
17413 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17415 machine_mode mode1;
17416 tree type;
17417 HOST_WIDE_INT mask = d->mask;
17419 if ((mask & builtin_mask) != mask)
17421 if (TARGET_DEBUG_BUILTIN)
17422 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17423 d->name);
17424 continue;
17427 if (rs6000_overloaded_builtin_p (d->code))
17428 mode1 = VOIDmode;
17429 else
17431 /* Cannot define builtin if the instruction is disabled. */
17432 gcc_assert (d->icode != CODE_FOR_nothing);
17433 mode1 = insn_data[d->icode].operand[1].mode;
17436 switch (mode1)
17438 case VOIDmode:
17439 type = int_ftype_int_opaque_opaque;
17440 break;
17441 case V2DImode:
17442 type = int_ftype_int_v2di_v2di;
17443 break;
17444 case V4SImode:
17445 type = int_ftype_int_v4si_v4si;
17446 break;
17447 case V8HImode:
17448 type = int_ftype_int_v8hi_v8hi;
17449 break;
17450 case V16QImode:
17451 type = int_ftype_int_v16qi_v16qi;
17452 break;
17453 case V4SFmode:
17454 type = int_ftype_int_v4sf_v4sf;
17455 break;
17456 case V2DFmode:
17457 type = int_ftype_int_v2df_v2df;
17458 break;
17459 default:
17460 gcc_unreachable ();
17463 def_builtin (d->name, type, d->code);
17466 /* Initialize the abs* operators. */
17467 d = bdesc_abs;
17468 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17470 machine_mode mode0;
17471 tree type;
17472 HOST_WIDE_INT mask = d->mask;
17474 if ((mask & builtin_mask) != mask)
17476 if (TARGET_DEBUG_BUILTIN)
17477 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17478 d->name);
17479 continue;
17482 /* Cannot define builtin if the instruction is disabled. */
17483 gcc_assert (d->icode != CODE_FOR_nothing);
17484 mode0 = insn_data[d->icode].operand[0].mode;
17486 switch (mode0)
17488 case V2DImode:
17489 type = v2di_ftype_v2di;
17490 break;
17491 case V4SImode:
17492 type = v4si_ftype_v4si;
17493 break;
17494 case V8HImode:
17495 type = v8hi_ftype_v8hi;
17496 break;
17497 case V16QImode:
17498 type = v16qi_ftype_v16qi;
17499 break;
17500 case V4SFmode:
17501 type = v4sf_ftype_v4sf;
17502 break;
17503 case V2DFmode:
17504 type = v2df_ftype_v2df;
17505 break;
17506 default:
17507 gcc_unreachable ();
17510 def_builtin (d->name, type, d->code);
17513 /* Initialize target builtin that implements
17514 targetm.vectorize.builtin_mask_for_load. */
17516 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17517 v16qi_ftype_long_pcvoid,
17518 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17519 BUILT_IN_MD, NULL, NULL_TREE);
17520 TREE_READONLY (decl) = 1;
17521 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17522 altivec_builtin_mask_for_load = decl;
17524 /* Access to the vec_init patterns. */
17525 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17526 integer_type_node, integer_type_node,
17527 integer_type_node, NULL_TREE);
17528 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17530 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17531 short_integer_type_node,
17532 short_integer_type_node,
17533 short_integer_type_node,
17534 short_integer_type_node,
17535 short_integer_type_node,
17536 short_integer_type_node,
17537 short_integer_type_node, NULL_TREE);
17538 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17540 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17541 char_type_node, char_type_node,
17542 char_type_node, char_type_node,
17543 char_type_node, char_type_node,
17544 char_type_node, char_type_node,
17545 char_type_node, char_type_node,
17546 char_type_node, char_type_node,
17547 char_type_node, char_type_node,
17548 char_type_node, NULL_TREE);
17549 def_builtin ("__builtin_vec_init_v16qi", ftype,
17550 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17552 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17553 float_type_node, float_type_node,
17554 float_type_node, NULL_TREE);
17555 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17557 /* VSX builtins. */
17558 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17559 double_type_node, NULL_TREE);
17560 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17562 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17563 intDI_type_node, NULL_TREE);
17564 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17566 /* Access to the vec_set patterns. */
17567 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17568 intSI_type_node,
17569 integer_type_node, NULL_TREE);
17570 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17572 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17573 intHI_type_node,
17574 integer_type_node, NULL_TREE);
17575 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17577 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17578 intQI_type_node,
17579 integer_type_node, NULL_TREE);
17580 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17582 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17583 float_type_node,
17584 integer_type_node, NULL_TREE);
17585 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17587 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17588 double_type_node,
17589 integer_type_node, NULL_TREE);
17590 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17592 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17593 intDI_type_node,
17594 integer_type_node, NULL_TREE);
17595 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17597 /* Access to the vec_extract patterns. */
17598 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17599 integer_type_node, NULL_TREE);
17600 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17602 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17603 integer_type_node, NULL_TREE);
17604 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17606 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17607 integer_type_node, NULL_TREE);
17608 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17610 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17611 integer_type_node, NULL_TREE);
17612 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17614 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17615 integer_type_node, NULL_TREE);
17616 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17618 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17619 integer_type_node, NULL_TREE);
17620 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17623 if (V1TI_type_node)
17625 tree v1ti_ftype_long_pcvoid
17626 = build_function_type_list (V1TI_type_node,
17627 long_integer_type_node, pcvoid_type_node,
17628 NULL_TREE);
17629 tree void_ftype_v1ti_long_pvoid
17630 = build_function_type_list (void_type_node,
17631 V1TI_type_node, long_integer_type_node,
17632 pvoid_type_node, NULL_TREE);
17633 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17634 VSX_BUILTIN_LXVD2X_V1TI);
17635 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17636 VSX_BUILTIN_STXVD2X_V1TI);
17637 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17638 NULL_TREE, NULL_TREE);
17639 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17640 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17641 intTI_type_node,
17642 integer_type_node, NULL_TREE);
17643 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17644 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17645 integer_type_node, NULL_TREE);
17646 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17651 static void
17652 htm_init_builtins (void)
17654 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17655 const struct builtin_description *d;
17656 size_t i;
17658 d = bdesc_htm;
17659 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17661 tree op[MAX_HTM_OPERANDS], type;
17662 HOST_WIDE_INT mask = d->mask;
17663 unsigned attr = rs6000_builtin_info[d->code].attr;
17664 bool void_func = (attr & RS6000_BTC_VOID);
17665 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17666 int nopnds = 0;
17667 tree gpr_type_node;
17668 tree rettype;
17669 tree argtype;
17671 /* It is expected that these htm built-in functions may have
17672 d->icode equal to CODE_FOR_nothing. */
17674 if (TARGET_32BIT && TARGET_POWERPC64)
17675 gpr_type_node = long_long_unsigned_type_node;
17676 else
17677 gpr_type_node = long_unsigned_type_node;
17679 if (attr & RS6000_BTC_SPR)
17681 rettype = gpr_type_node;
17682 argtype = gpr_type_node;
17684 else if (d->code == HTM_BUILTIN_TABORTDC
17685 || d->code == HTM_BUILTIN_TABORTDCI)
17687 rettype = unsigned_type_node;
17688 argtype = gpr_type_node;
17690 else
17692 rettype = unsigned_type_node;
17693 argtype = unsigned_type_node;
17696 if ((mask & builtin_mask) != mask)
17698 if (TARGET_DEBUG_BUILTIN)
17699 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17700 continue;
17703 if (d->name == 0)
17705 if (TARGET_DEBUG_BUILTIN)
17706 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17707 (long unsigned) i);
17708 continue;
17711 op[nopnds++] = (void_func) ? void_type_node : rettype;
17713 if (attr_args == RS6000_BTC_UNARY)
17714 op[nopnds++] = argtype;
17715 else if (attr_args == RS6000_BTC_BINARY)
17717 op[nopnds++] = argtype;
17718 op[nopnds++] = argtype;
17720 else if (attr_args == RS6000_BTC_TERNARY)
17722 op[nopnds++] = argtype;
17723 op[nopnds++] = argtype;
17724 op[nopnds++] = argtype;
17727 switch (nopnds)
17729 case 1:
17730 type = build_function_type_list (op[0], NULL_TREE);
17731 break;
17732 case 2:
17733 type = build_function_type_list (op[0], op[1], NULL_TREE);
17734 break;
17735 case 3:
17736 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17737 break;
17738 case 4:
17739 type = build_function_type_list (op[0], op[1], op[2], op[3],
17740 NULL_TREE);
17741 break;
17742 default:
17743 gcc_unreachable ();
17746 def_builtin (d->name, type, d->code);
17750 /* Hash function for builtin functions with up to 3 arguments and a return
17751 type. */
17752 hashval_t
17753 builtin_hasher::hash (builtin_hash_struct *bh)
17755 unsigned ret = 0;
17756 int i;
17758 for (i = 0; i < 4; i++)
17760 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17761 ret = (ret * 2) + bh->uns_p[i];
17764 return ret;
17767 /* Compare builtin hash entries H1 and H2 for equivalence. */
17768 bool
17769 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17771 return ((p1->mode[0] == p2->mode[0])
17772 && (p1->mode[1] == p2->mode[1])
17773 && (p1->mode[2] == p2->mode[2])
17774 && (p1->mode[3] == p2->mode[3])
17775 && (p1->uns_p[0] == p2->uns_p[0])
17776 && (p1->uns_p[1] == p2->uns_p[1])
17777 && (p1->uns_p[2] == p2->uns_p[2])
17778 && (p1->uns_p[3] == p2->uns_p[3]));
17781 /* Map types for builtin functions with an explicit return type and up to 3
17782 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17783 of the argument. */
17784 static tree
17785 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17786 machine_mode mode_arg1, machine_mode mode_arg2,
17787 enum rs6000_builtins builtin, const char *name)
17789 struct builtin_hash_struct h;
17790 struct builtin_hash_struct *h2;
17791 int num_args = 3;
17792 int i;
17793 tree ret_type = NULL_TREE;
17794 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17796 /* Create builtin_hash_table. */
17797 if (builtin_hash_table == NULL)
17798 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17800 h.type = NULL_TREE;
17801 h.mode[0] = mode_ret;
17802 h.mode[1] = mode_arg0;
17803 h.mode[2] = mode_arg1;
17804 h.mode[3] = mode_arg2;
17805 h.uns_p[0] = 0;
17806 h.uns_p[1] = 0;
17807 h.uns_p[2] = 0;
17808 h.uns_p[3] = 0;
17810 /* If the builtin is a type that produces unsigned results or takes unsigned
17811 arguments, and it is returned as a decl for the vectorizer (such as
17812 widening multiplies, permute), make sure the arguments and return value
17813 are type correct. */
17814 switch (builtin)
17816 /* unsigned 1 argument functions. */
17817 case CRYPTO_BUILTIN_VSBOX:
17818 case P8V_BUILTIN_VGBBD:
17819 case MISC_BUILTIN_CDTBCD:
17820 case MISC_BUILTIN_CBCDTD:
17821 h.uns_p[0] = 1;
17822 h.uns_p[1] = 1;
17823 break;
17825 /* unsigned 2 argument functions. */
17826 case ALTIVEC_BUILTIN_VMULEUB:
17827 case ALTIVEC_BUILTIN_VMULEUH:
17828 case ALTIVEC_BUILTIN_VMULEUW:
17829 case ALTIVEC_BUILTIN_VMULOUB:
17830 case ALTIVEC_BUILTIN_VMULOUH:
17831 case ALTIVEC_BUILTIN_VMULOUW:
17832 case CRYPTO_BUILTIN_VCIPHER:
17833 case CRYPTO_BUILTIN_VCIPHERLAST:
17834 case CRYPTO_BUILTIN_VNCIPHER:
17835 case CRYPTO_BUILTIN_VNCIPHERLAST:
17836 case CRYPTO_BUILTIN_VPMSUMB:
17837 case CRYPTO_BUILTIN_VPMSUMH:
17838 case CRYPTO_BUILTIN_VPMSUMW:
17839 case CRYPTO_BUILTIN_VPMSUMD:
17840 case CRYPTO_BUILTIN_VPMSUM:
17841 case MISC_BUILTIN_ADDG6S:
17842 case MISC_BUILTIN_DIVWEU:
17843 case MISC_BUILTIN_DIVWEUO:
17844 case MISC_BUILTIN_DIVDEU:
17845 case MISC_BUILTIN_DIVDEUO:
17846 case VSX_BUILTIN_UDIV_V2DI:
17847 case ALTIVEC_BUILTIN_VMAXUB:
17848 case ALTIVEC_BUILTIN_VMINUB:
17849 case ALTIVEC_BUILTIN_VMAXUH:
17850 case ALTIVEC_BUILTIN_VMINUH:
17851 case ALTIVEC_BUILTIN_VMAXUW:
17852 case ALTIVEC_BUILTIN_VMINUW:
17853 case P8V_BUILTIN_VMAXUD:
17854 case P8V_BUILTIN_VMINUD:
17855 h.uns_p[0] = 1;
17856 h.uns_p[1] = 1;
17857 h.uns_p[2] = 1;
17858 break;
17860 /* unsigned 3 argument functions. */
17861 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17862 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17863 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17864 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17865 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17866 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17867 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17868 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17869 case VSX_BUILTIN_VPERM_16QI_UNS:
17870 case VSX_BUILTIN_VPERM_8HI_UNS:
17871 case VSX_BUILTIN_VPERM_4SI_UNS:
17872 case VSX_BUILTIN_VPERM_2DI_UNS:
17873 case VSX_BUILTIN_XXSEL_16QI_UNS:
17874 case VSX_BUILTIN_XXSEL_8HI_UNS:
17875 case VSX_BUILTIN_XXSEL_4SI_UNS:
17876 case VSX_BUILTIN_XXSEL_2DI_UNS:
17877 case CRYPTO_BUILTIN_VPERMXOR:
17878 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17879 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17880 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17881 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17882 case CRYPTO_BUILTIN_VSHASIGMAW:
17883 case CRYPTO_BUILTIN_VSHASIGMAD:
17884 case CRYPTO_BUILTIN_VSHASIGMA:
17885 h.uns_p[0] = 1;
17886 h.uns_p[1] = 1;
17887 h.uns_p[2] = 1;
17888 h.uns_p[3] = 1;
17889 break;
17891 /* signed permute functions with unsigned char mask. */
17892 case ALTIVEC_BUILTIN_VPERM_16QI:
17893 case ALTIVEC_BUILTIN_VPERM_8HI:
17894 case ALTIVEC_BUILTIN_VPERM_4SI:
17895 case ALTIVEC_BUILTIN_VPERM_4SF:
17896 case ALTIVEC_BUILTIN_VPERM_2DI:
17897 case ALTIVEC_BUILTIN_VPERM_2DF:
17898 case VSX_BUILTIN_VPERM_16QI:
17899 case VSX_BUILTIN_VPERM_8HI:
17900 case VSX_BUILTIN_VPERM_4SI:
17901 case VSX_BUILTIN_VPERM_4SF:
17902 case VSX_BUILTIN_VPERM_2DI:
17903 case VSX_BUILTIN_VPERM_2DF:
17904 h.uns_p[3] = 1;
17905 break;
17907 /* unsigned args, signed return. */
17908 case VSX_BUILTIN_XVCVUXDSP:
17909 case VSX_BUILTIN_XVCVUXDDP_UNS:
17910 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17911 h.uns_p[1] = 1;
17912 break;
17914 /* signed args, unsigned return. */
17915 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17916 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17917 case MISC_BUILTIN_UNPACK_TD:
17918 case MISC_BUILTIN_UNPACK_V1TI:
17919 h.uns_p[0] = 1;
17920 break;
17922 /* unsigned arguments for 128-bit pack instructions. */
17923 case MISC_BUILTIN_PACK_TD:
17924 case MISC_BUILTIN_PACK_V1TI:
17925 h.uns_p[1] = 1;
17926 h.uns_p[2] = 1;
17927 break;
17929 /* unsigned second arguments (vector shift right). */
17930 case ALTIVEC_BUILTIN_VSRB:
17931 case ALTIVEC_BUILTIN_VSRH:
17932 case ALTIVEC_BUILTIN_VSRW:
17933 case P8V_BUILTIN_VSRD:
17934 h.uns_p[2] = 1;
17935 break;
17937 default:
17938 break;
17941 /* Figure out how many args are present. */
17942 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17943 num_args--;
17945 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17946 if (!ret_type && h.uns_p[0])
17947 ret_type = builtin_mode_to_type[h.mode[0]][0];
17949 if (!ret_type)
17950 fatal_error (input_location,
17951 "internal error: builtin function %s had an unexpected "
17952 "return type %s", name, GET_MODE_NAME (h.mode[0]));
17954 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17955 arg_type[i] = NULL_TREE;
17957 for (i = 0; i < num_args; i++)
17959 int m = (int) h.mode[i+1];
17960 int uns_p = h.uns_p[i+1];
17962 arg_type[i] = builtin_mode_to_type[m][uns_p];
17963 if (!arg_type[i] && uns_p)
17964 arg_type[i] = builtin_mode_to_type[m][0];
17966 if (!arg_type[i])
17967 fatal_error (input_location,
17968 "internal error: builtin function %s, argument %d "
17969 "had unexpected argument type %s", name, i,
17970 GET_MODE_NAME (m));
17973 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17974 if (*found == NULL)
17976 h2 = ggc_alloc<builtin_hash_struct> ();
17977 *h2 = h;
17978 *found = h2;
17980 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17981 arg_type[2], NULL_TREE);
17984 return (*found)->type;
17987 static void
17988 rs6000_common_init_builtins (void)
17990 const struct builtin_description *d;
17991 size_t i;
17993 tree opaque_ftype_opaque = NULL_TREE;
17994 tree opaque_ftype_opaque_opaque = NULL_TREE;
17995 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17996 tree v2si_ftype = NULL_TREE;
17997 tree v2si_ftype_qi = NULL_TREE;
17998 tree v2si_ftype_v2si_qi = NULL_TREE;
17999 tree v2si_ftype_int_qi = NULL_TREE;
18000 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18002 if (!TARGET_PAIRED_FLOAT)
18004 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18005 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18008 /* Paired builtins are only available if you build a compiler with the
18009 appropriate options, so only create those builtins with the appropriate
18010 compiler option. Create Altivec and VSX builtins on machines with at
18011 least the general purpose extensions (970 and newer) to allow the use of
18012 the target attribute.. */
18014 if (TARGET_EXTRA_BUILTINS)
18015 builtin_mask |= RS6000_BTM_COMMON;
18017 /* Add the ternary operators. */
18018 d = bdesc_3arg;
18019 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18021 tree type;
18022 HOST_WIDE_INT mask = d->mask;
18024 if ((mask & builtin_mask) != mask)
18026 if (TARGET_DEBUG_BUILTIN)
18027 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18028 continue;
18031 if (rs6000_overloaded_builtin_p (d->code))
18033 if (! (type = opaque_ftype_opaque_opaque_opaque))
18034 type = opaque_ftype_opaque_opaque_opaque
18035 = build_function_type_list (opaque_V4SI_type_node,
18036 opaque_V4SI_type_node,
18037 opaque_V4SI_type_node,
18038 opaque_V4SI_type_node,
18039 NULL_TREE);
18041 else
18043 enum insn_code icode = d->icode;
18044 if (d->name == 0)
18046 if (TARGET_DEBUG_BUILTIN)
18047 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18048 (long unsigned)i);
18050 continue;
18053 if (icode == CODE_FOR_nothing)
18055 if (TARGET_DEBUG_BUILTIN)
18056 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18057 d->name);
18059 continue;
18062 type = builtin_function_type (insn_data[icode].operand[0].mode,
18063 insn_data[icode].operand[1].mode,
18064 insn_data[icode].operand[2].mode,
18065 insn_data[icode].operand[3].mode,
18066 d->code, d->name);
18069 def_builtin (d->name, type, d->code);
18072 /* Add the binary operators. */
18073 d = bdesc_2arg;
18074 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18076 machine_mode mode0, mode1, mode2;
18077 tree type;
18078 HOST_WIDE_INT mask = d->mask;
18080 if ((mask & builtin_mask) != mask)
18082 if (TARGET_DEBUG_BUILTIN)
18083 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18084 continue;
18087 if (rs6000_overloaded_builtin_p (d->code))
18089 if (! (type = opaque_ftype_opaque_opaque))
18090 type = opaque_ftype_opaque_opaque
18091 = build_function_type_list (opaque_V4SI_type_node,
18092 opaque_V4SI_type_node,
18093 opaque_V4SI_type_node,
18094 NULL_TREE);
18096 else
18098 enum insn_code icode = d->icode;
18099 if (d->name == 0)
18101 if (TARGET_DEBUG_BUILTIN)
18102 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18103 (long unsigned)i);
18105 continue;
18108 if (icode == CODE_FOR_nothing)
18110 if (TARGET_DEBUG_BUILTIN)
18111 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18112 d->name);
18114 continue;
18117 mode0 = insn_data[icode].operand[0].mode;
18118 mode1 = insn_data[icode].operand[1].mode;
18119 mode2 = insn_data[icode].operand[2].mode;
18121 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18123 if (! (type = v2si_ftype_v2si_qi))
18124 type = v2si_ftype_v2si_qi
18125 = build_function_type_list (opaque_V2SI_type_node,
18126 opaque_V2SI_type_node,
18127 char_type_node,
18128 NULL_TREE);
18131 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18132 && mode2 == QImode)
18134 if (! (type = v2si_ftype_int_qi))
18135 type = v2si_ftype_int_qi
18136 = build_function_type_list (opaque_V2SI_type_node,
18137 integer_type_node,
18138 char_type_node,
18139 NULL_TREE);
18142 else
18143 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18144 d->code, d->name);
18147 def_builtin (d->name, type, d->code);
18150 /* Add the simple unary operators. */
18151 d = bdesc_1arg;
18152 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18154 machine_mode mode0, mode1;
18155 tree type;
18156 HOST_WIDE_INT mask = d->mask;
18158 if ((mask & builtin_mask) != mask)
18160 if (TARGET_DEBUG_BUILTIN)
18161 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18162 continue;
18165 if (rs6000_overloaded_builtin_p (d->code))
18167 if (! (type = opaque_ftype_opaque))
18168 type = opaque_ftype_opaque
18169 = build_function_type_list (opaque_V4SI_type_node,
18170 opaque_V4SI_type_node,
18171 NULL_TREE);
18173 else
18175 enum insn_code icode = d->icode;
18176 if (d->name == 0)
18178 if (TARGET_DEBUG_BUILTIN)
18179 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18180 (long unsigned)i);
18182 continue;
18185 if (icode == CODE_FOR_nothing)
18187 if (TARGET_DEBUG_BUILTIN)
18188 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18189 d->name);
18191 continue;
18194 mode0 = insn_data[icode].operand[0].mode;
18195 mode1 = insn_data[icode].operand[1].mode;
18197 if (mode0 == V2SImode && mode1 == QImode)
18199 if (! (type = v2si_ftype_qi))
18200 type = v2si_ftype_qi
18201 = build_function_type_list (opaque_V2SI_type_node,
18202 char_type_node,
18203 NULL_TREE);
18206 else
18207 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18208 d->code, d->name);
18211 def_builtin (d->name, type, d->code);
18214 /* Add the simple no-argument operators. */
18215 d = bdesc_0arg;
18216 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18218 machine_mode mode0;
18219 tree type;
18220 HOST_WIDE_INT mask = d->mask;
18222 if ((mask & builtin_mask) != mask)
18224 if (TARGET_DEBUG_BUILTIN)
18225 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18226 continue;
18228 if (rs6000_overloaded_builtin_p (d->code))
18230 if (!opaque_ftype_opaque)
18231 opaque_ftype_opaque
18232 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18233 type = opaque_ftype_opaque;
18235 else
18237 enum insn_code icode = d->icode;
18238 if (d->name == 0)
18240 if (TARGET_DEBUG_BUILTIN)
18241 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18242 (long unsigned) i);
18243 continue;
18245 if (icode == CODE_FOR_nothing)
18247 if (TARGET_DEBUG_BUILTIN)
18248 fprintf (stderr,
18249 "rs6000_builtin, skip no-argument %s (no code)\n",
18250 d->name);
18251 continue;
18253 mode0 = insn_data[icode].operand[0].mode;
18254 if (mode0 == V2SImode)
18256 /* code for paired single */
18257 if (! (type = v2si_ftype))
18259 v2si_ftype
18260 = build_function_type_list (opaque_V2SI_type_node,
18261 NULL_TREE);
18262 type = v2si_ftype;
18265 else
18266 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18267 d->code, d->name);
18269 def_builtin (d->name, type, d->code);
18273 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18274 static void
18275 init_float128_ibm (machine_mode mode)
18277 if (!TARGET_XL_COMPAT)
18279 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18280 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18281 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18282 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18284 if (!TARGET_HARD_FLOAT)
18286 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18287 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18288 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18289 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18290 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18291 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18292 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18293 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18295 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18296 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18297 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18298 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18299 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18300 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18301 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18302 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18305 else
18307 set_optab_libfunc (add_optab, mode, "_xlqadd");
18308 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18309 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18310 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18313 /* Add various conversions for IFmode to use the traditional TFmode
18314 names. */
18315 if (mode == IFmode)
18317 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18318 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18319 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18320 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18321 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18322 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18324 if (TARGET_POWERPC64)
18326 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18327 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18328 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18329 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18334 /* Set up IEEE 128-bit floating point routines. Use different names if the
18335 arguments can be passed in a vector register. The historical PowerPC
18336 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18337 continue to use that if we aren't using vector registers to pass IEEE
18338 128-bit floating point. */
18340 static void
18341 init_float128_ieee (machine_mode mode)
18343 if (FLOAT128_VECTOR_P (mode))
18345 set_optab_libfunc (add_optab, mode, "__addkf3");
18346 set_optab_libfunc (sub_optab, mode, "__subkf3");
18347 set_optab_libfunc (neg_optab, mode, "__negkf2");
18348 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18349 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18350 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18351 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18353 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18354 set_optab_libfunc (ne_optab, mode, "__nekf2");
18355 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18356 set_optab_libfunc (ge_optab, mode, "__gekf2");
18357 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18358 set_optab_libfunc (le_optab, mode, "__lekf2");
18359 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18361 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18362 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18363 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18364 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18366 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18367 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18368 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18370 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18371 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18372 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18374 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18375 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18376 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18377 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18378 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18379 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18381 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18382 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18383 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18384 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18386 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18387 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18388 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18389 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18391 if (TARGET_POWERPC64)
18393 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18394 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18395 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18396 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18400 else
18402 set_optab_libfunc (add_optab, mode, "_q_add");
18403 set_optab_libfunc (sub_optab, mode, "_q_sub");
18404 set_optab_libfunc (neg_optab, mode, "_q_neg");
18405 set_optab_libfunc (smul_optab, mode, "_q_mul");
18406 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18407 if (TARGET_PPC_GPOPT)
18408 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18410 set_optab_libfunc (eq_optab, mode, "_q_feq");
18411 set_optab_libfunc (ne_optab, mode, "_q_fne");
18412 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18413 set_optab_libfunc (ge_optab, mode, "_q_fge");
18414 set_optab_libfunc (lt_optab, mode, "_q_flt");
18415 set_optab_libfunc (le_optab, mode, "_q_fle");
18417 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18418 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18419 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18420 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18421 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18422 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18423 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18424 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18428 static void
18429 rs6000_init_libfuncs (void)
18431 /* __float128 support. */
18432 if (TARGET_FLOAT128_TYPE)
18434 init_float128_ibm (IFmode);
18435 init_float128_ieee (KFmode);
18438 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18439 if (TARGET_LONG_DOUBLE_128)
18441 if (!TARGET_IEEEQUAD)
18442 init_float128_ibm (TFmode);
18444 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18445 else
18446 init_float128_ieee (TFmode);
18450 /* Emit a potentially record-form instruction, setting DST from SRC.
18451 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18452 signed comparison of DST with zero. If DOT is 1, the generated RTL
18453 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18454 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18455 a separate COMPARE. */
18457 void
18458 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18460 if (dot == 0)
18462 emit_move_insn (dst, src);
18463 return;
18466 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18468 emit_move_insn (dst, src);
18469 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18470 return;
18473 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18474 if (dot == 1)
18476 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18477 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18479 else
18481 rtx set = gen_rtx_SET (dst, src);
18482 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18487 /* A validation routine: say whether CODE, a condition code, and MODE
18488 match. The other alternatives either don't make sense or should
18489 never be generated. */
18491 void
18492 validate_condition_mode (enum rtx_code code, machine_mode mode)
18494 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18495 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18496 && GET_MODE_CLASS (mode) == MODE_CC);
18498 /* These don't make sense. */
18499 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18500 || mode != CCUNSmode);
18502 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18503 || mode == CCUNSmode);
18505 gcc_assert (mode == CCFPmode
18506 || (code != ORDERED && code != UNORDERED
18507 && code != UNEQ && code != LTGT
18508 && code != UNGT && code != UNLT
18509 && code != UNGE && code != UNLE));
18511 /* These should never be generated except for
18512 flag_finite_math_only. */
18513 gcc_assert (mode != CCFPmode
18514 || flag_finite_math_only
18515 || (code != LE && code != GE
18516 && code != UNEQ && code != LTGT
18517 && code != UNGT && code != UNLT));
18519 /* These are invalid; the information is not there. */
18520 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18524 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18525 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18526 not zero, store there the bit offset (counted from the right) where
18527 the single stretch of 1 bits begins; and similarly for B, the bit
18528 offset where it ends. */
18530 bool
18531 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18533 unsigned HOST_WIDE_INT val = INTVAL (mask);
18534 unsigned HOST_WIDE_INT bit;
18535 int nb, ne;
18536 int n = GET_MODE_PRECISION (mode);
18538 if (mode != DImode && mode != SImode)
18539 return false;
18541 if (INTVAL (mask) >= 0)
18543 bit = val & -val;
18544 ne = exact_log2 (bit);
18545 nb = exact_log2 (val + bit);
18547 else if (val + 1 == 0)
18549 nb = n;
18550 ne = 0;
18552 else if (val & 1)
18554 val = ~val;
18555 bit = val & -val;
18556 nb = exact_log2 (bit);
18557 ne = exact_log2 (val + bit);
18559 else
18561 bit = val & -val;
18562 ne = exact_log2 (bit);
18563 if (val + bit == 0)
18564 nb = n;
18565 else
18566 nb = 0;
18569 nb--;
18571 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18572 return false;
18574 if (b)
18575 *b = nb;
18576 if (e)
18577 *e = ne;
18579 return true;
18582 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18583 or rldicr instruction, to implement an AND with it in mode MODE. */
18585 bool
18586 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18588 int nb, ne;
18590 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18591 return false;
18593 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18594 does not wrap. */
18595 if (mode == DImode)
18596 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18598 /* For SImode, rlwinm can do everything. */
18599 if (mode == SImode)
18600 return (nb < 32 && ne < 32);
18602 return false;
18605 /* Return the instruction template for an AND with mask in mode MODE, with
18606 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18608 const char *
18609 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18611 int nb, ne;
18613 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18614 gcc_unreachable ();
18616 if (mode == DImode && ne == 0)
18618 operands[3] = GEN_INT (63 - nb);
18619 if (dot)
18620 return "rldicl. %0,%1,0,%3";
18621 return "rldicl %0,%1,0,%3";
18624 if (mode == DImode && nb == 63)
18626 operands[3] = GEN_INT (63 - ne);
18627 if (dot)
18628 return "rldicr. %0,%1,0,%3";
18629 return "rldicr %0,%1,0,%3";
18632 if (nb < 32 && ne < 32)
18634 operands[3] = GEN_INT (31 - nb);
18635 operands[4] = GEN_INT (31 - ne);
18636 if (dot)
18637 return "rlwinm. %0,%1,0,%3,%4";
18638 return "rlwinm %0,%1,0,%3,%4";
18641 gcc_unreachable ();
18644 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18645 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18646 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18648 bool
18649 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18651 int nb, ne;
18653 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18654 return false;
18656 int n = GET_MODE_PRECISION (mode);
18657 int sh = -1;
18659 if (CONST_INT_P (XEXP (shift, 1)))
18661 sh = INTVAL (XEXP (shift, 1));
18662 if (sh < 0 || sh >= n)
18663 return false;
18666 rtx_code code = GET_CODE (shift);
18668 /* Convert any shift by 0 to a rotate, to simplify below code. */
18669 if (sh == 0)
18670 code = ROTATE;
18672 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18673 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18674 code = ASHIFT;
18675 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18677 code = LSHIFTRT;
18678 sh = n - sh;
18681 /* DImode rotates need rld*. */
18682 if (mode == DImode && code == ROTATE)
18683 return (nb == 63 || ne == 0 || ne == sh);
18685 /* SImode rotates need rlw*. */
18686 if (mode == SImode && code == ROTATE)
18687 return (nb < 32 && ne < 32 && sh < 32);
18689 /* Wrap-around masks are only okay for rotates. */
18690 if (ne > nb)
18691 return false;
18693 /* Variable shifts are only okay for rotates. */
18694 if (sh < 0)
18695 return false;
18697 /* Don't allow ASHIFT if the mask is wrong for that. */
18698 if (code == ASHIFT && ne < sh)
18699 return false;
18701 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18702 if the mask is wrong for that. */
18703 if (nb < 32 && ne < 32 && sh < 32
18704 && !(code == LSHIFTRT && nb >= 32 - sh))
18705 return true;
18707 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18708 if the mask is wrong for that. */
18709 if (code == LSHIFTRT)
18710 sh = 64 - sh;
18711 if (nb == 63 || ne == 0 || ne == sh)
18712 return !(code == LSHIFTRT && nb >= sh);
18714 return false;
18717 /* Return the instruction template for a shift with mask in mode MODE, with
18718 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18720 const char *
18721 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18723 int nb, ne;
18725 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18726 gcc_unreachable ();
18728 if (mode == DImode && ne == 0)
18730 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18731 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18732 operands[3] = GEN_INT (63 - nb);
18733 if (dot)
18734 return "rld%I2cl. %0,%1,%2,%3";
18735 return "rld%I2cl %0,%1,%2,%3";
18738 if (mode == DImode && nb == 63)
18740 operands[3] = GEN_INT (63 - ne);
18741 if (dot)
18742 return "rld%I2cr. %0,%1,%2,%3";
18743 return "rld%I2cr %0,%1,%2,%3";
18746 if (mode == DImode
18747 && GET_CODE (operands[4]) != LSHIFTRT
18748 && CONST_INT_P (operands[2])
18749 && ne == INTVAL (operands[2]))
18751 operands[3] = GEN_INT (63 - nb);
18752 if (dot)
18753 return "rld%I2c. %0,%1,%2,%3";
18754 return "rld%I2c %0,%1,%2,%3";
18757 if (nb < 32 && ne < 32)
18759 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18760 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18761 operands[3] = GEN_INT (31 - nb);
18762 operands[4] = GEN_INT (31 - ne);
18763 /* This insn can also be a 64-bit rotate with mask that really makes
18764 it just a shift right (with mask); the %h below are to adjust for
18765 that situation (shift count is >= 32 in that case). */
18766 if (dot)
18767 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18768 return "rlw%I2nm %0,%1,%h2,%3,%4";
18771 gcc_unreachable ();
18774 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18775 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18776 ASHIFT, or LSHIFTRT) in mode MODE. */
18778 bool
18779 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18781 int nb, ne;
18783 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18784 return false;
18786 int n = GET_MODE_PRECISION (mode);
18788 int sh = INTVAL (XEXP (shift, 1));
18789 if (sh < 0 || sh >= n)
18790 return false;
18792 rtx_code code = GET_CODE (shift);
18794 /* Convert any shift by 0 to a rotate, to simplify below code. */
18795 if (sh == 0)
18796 code = ROTATE;
18798 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18799 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18800 code = ASHIFT;
18801 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18803 code = LSHIFTRT;
18804 sh = n - sh;
18807 /* DImode rotates need rldimi. */
18808 if (mode == DImode && code == ROTATE)
18809 return (ne == sh);
18811 /* SImode rotates need rlwimi. */
18812 if (mode == SImode && code == ROTATE)
18813 return (nb < 32 && ne < 32 && sh < 32);
18815 /* Wrap-around masks are only okay for rotates. */
18816 if (ne > nb)
18817 return false;
18819 /* Don't allow ASHIFT if the mask is wrong for that. */
18820 if (code == ASHIFT && ne < sh)
18821 return false;
18823 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18824 if the mask is wrong for that. */
18825 if (nb < 32 && ne < 32 && sh < 32
18826 && !(code == LSHIFTRT && nb >= 32 - sh))
18827 return true;
18829 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18830 if the mask is wrong for that. */
18831 if (code == LSHIFTRT)
18832 sh = 64 - sh;
18833 if (ne == sh)
18834 return !(code == LSHIFTRT && nb >= sh);
18836 return false;
18839 /* Return the instruction template for an insert with mask in mode MODE, with
18840 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18842 const char *
18843 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18845 int nb, ne;
18847 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18848 gcc_unreachable ();
18850 /* Prefer rldimi because rlwimi is cracked. */
18851 if (TARGET_POWERPC64
18852 && (!dot || mode == DImode)
18853 && GET_CODE (operands[4]) != LSHIFTRT
18854 && ne == INTVAL (operands[2]))
18856 operands[3] = GEN_INT (63 - nb);
18857 if (dot)
18858 return "rldimi. %0,%1,%2,%3";
18859 return "rldimi %0,%1,%2,%3";
18862 if (nb < 32 && ne < 32)
18864 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18865 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18866 operands[3] = GEN_INT (31 - nb);
18867 operands[4] = GEN_INT (31 - ne);
18868 if (dot)
18869 return "rlwimi. %0,%1,%2,%3,%4";
18870 return "rlwimi %0,%1,%2,%3,%4";
18873 gcc_unreachable ();
18876 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18877 using two machine instructions. */
18879 bool
18880 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18882 /* There are two kinds of AND we can handle with two insns:
18883 1) those we can do with two rl* insn;
18884 2) ori[s];xori[s].
18886 We do not handle that last case yet. */
18888 /* If there is just one stretch of ones, we can do it. */
18889 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18890 return true;
18892 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18893 one insn, we can do the whole thing with two. */
18894 unsigned HOST_WIDE_INT val = INTVAL (c);
18895 unsigned HOST_WIDE_INT bit1 = val & -val;
18896 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18897 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18898 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18899 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18902 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18903 If EXPAND is true, split rotate-and-mask instructions we generate to
18904 their constituent parts as well (this is used during expand); if DOT
18905 is 1, make the last insn a record-form instruction clobbering the
18906 destination GPR and setting the CC reg (from operands[3]); if 2, set
18907 that GPR as well as the CC reg. */
18909 void
18910 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18912 gcc_assert (!(expand && dot));
18914 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18916 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18917 shift right. This generates better code than doing the masks without
18918 shifts, or shifting first right and then left. */
18919 int nb, ne;
18920 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18922 gcc_assert (mode == DImode);
18924 int shift = 63 - nb;
18925 if (expand)
18927 rtx tmp1 = gen_reg_rtx (DImode);
18928 rtx tmp2 = gen_reg_rtx (DImode);
18929 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18930 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18931 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18933 else
18935 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18936 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18937 emit_move_insn (operands[0], tmp);
18938 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18939 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18941 return;
18944 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18945 that does the rest. */
18946 unsigned HOST_WIDE_INT bit1 = val & -val;
18947 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18948 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18949 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18951 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18952 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18954 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18956 /* Two "no-rotate"-and-mask instructions, for SImode. */
18957 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18959 gcc_assert (mode == SImode);
18961 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18962 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18963 emit_move_insn (reg, tmp);
18964 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18965 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18966 return;
18969 gcc_assert (mode == DImode);
18971 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18972 insns; we have to do the first in SImode, because it wraps. */
18973 if (mask2 <= 0xffffffff
18974 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18976 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18977 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18978 GEN_INT (mask1));
18979 rtx reg_low = gen_lowpart (SImode, reg);
18980 emit_move_insn (reg_low, tmp);
18981 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18982 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18983 return;
18986 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18987 at the top end), rotate back and clear the other hole. */
18988 int right = exact_log2 (bit3);
18989 int left = 64 - right;
18991 /* Rotate the mask too. */
18992 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18994 if (expand)
18996 rtx tmp1 = gen_reg_rtx (DImode);
18997 rtx tmp2 = gen_reg_rtx (DImode);
18998 rtx tmp3 = gen_reg_rtx (DImode);
18999 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19000 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19001 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19002 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19004 else
19006 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19007 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19008 emit_move_insn (operands[0], tmp);
19009 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19010 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19011 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19015 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19016 for lfq and stfq insns iff the registers are hard registers. */
19019 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19021 /* We might have been passed a SUBREG. */
19022 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19023 return 0;
19025 /* We might have been passed non floating point registers. */
19026 if (!FP_REGNO_P (REGNO (reg1))
19027 || !FP_REGNO_P (REGNO (reg2)))
19028 return 0;
19030 return (REGNO (reg1) == REGNO (reg2) - 1);
19033 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19034 addr1 and addr2 must be in consecutive memory locations
19035 (addr2 == addr1 + 8). */
19038 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19040 rtx addr1, addr2;
19041 unsigned int reg1, reg2;
19042 int offset1, offset2;
19044 /* The mems cannot be volatile. */
19045 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19046 return 0;
19048 addr1 = XEXP (mem1, 0);
19049 addr2 = XEXP (mem2, 0);
19051 /* Extract an offset (if used) from the first addr. */
19052 if (GET_CODE (addr1) == PLUS)
19054 /* If not a REG, return zero. */
19055 if (GET_CODE (XEXP (addr1, 0)) != REG)
19056 return 0;
19057 else
19059 reg1 = REGNO (XEXP (addr1, 0));
19060 /* The offset must be constant! */
19061 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19062 return 0;
19063 offset1 = INTVAL (XEXP (addr1, 1));
19066 else if (GET_CODE (addr1) != REG)
19067 return 0;
19068 else
19070 reg1 = REGNO (addr1);
19071 /* This was a simple (mem (reg)) expression. Offset is 0. */
19072 offset1 = 0;
19075 /* And now for the second addr. */
19076 if (GET_CODE (addr2) == PLUS)
19078 /* If not a REG, return zero. */
19079 if (GET_CODE (XEXP (addr2, 0)) != REG)
19080 return 0;
19081 else
19083 reg2 = REGNO (XEXP (addr2, 0));
19084 /* The offset must be constant. */
19085 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19086 return 0;
19087 offset2 = INTVAL (XEXP (addr2, 1));
19090 else if (GET_CODE (addr2) != REG)
19091 return 0;
19092 else
19094 reg2 = REGNO (addr2);
19095 /* This was a simple (mem (reg)) expression. Offset is 0. */
19096 offset2 = 0;
19099 /* Both of these must have the same base register. */
19100 if (reg1 != reg2)
19101 return 0;
19103 /* The offset for the second addr must be 8 more than the first addr. */
19104 if (offset2 != offset1 + 8)
19105 return 0;
19107 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19108 instructions. */
19109 return 1;
19112 /* Return the mode to be used for memory when a secondary memory
19113 location is needed. For SDmode values we need to use DDmode, in
19114 all other cases we can use the same mode. */
19115 machine_mode
19116 rs6000_secondary_memory_needed_mode (machine_mode mode)
19118 if (lra_in_progress && mode == SDmode)
19119 return DDmode;
19120 return mode;
19123 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19124 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19125 only work on the traditional altivec registers, note if an altivec register
19126 was chosen. */
19128 static enum rs6000_reg_type
19129 register_to_reg_type (rtx reg, bool *is_altivec)
19131 HOST_WIDE_INT regno;
19132 enum reg_class rclass;
19134 if (GET_CODE (reg) == SUBREG)
19135 reg = SUBREG_REG (reg);
19137 if (!REG_P (reg))
19138 return NO_REG_TYPE;
19140 regno = REGNO (reg);
19141 if (regno >= FIRST_PSEUDO_REGISTER)
19143 if (!lra_in_progress && !reload_completed)
19144 return PSEUDO_REG_TYPE;
19146 regno = true_regnum (reg);
19147 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19148 return PSEUDO_REG_TYPE;
19151 gcc_assert (regno >= 0);
19153 if (is_altivec && ALTIVEC_REGNO_P (regno))
19154 *is_altivec = true;
19156 rclass = rs6000_regno_regclass[regno];
19157 return reg_class_to_reg_type[(int)rclass];
19160 /* Helper function to return the cost of adding a TOC entry address. */
19162 static inline int
19163 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19165 int ret;
19167 if (TARGET_CMODEL != CMODEL_SMALL)
19168 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19170 else
19171 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19173 return ret;
19176 /* Helper function for rs6000_secondary_reload to determine whether the memory
19177 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19178 needs reloading. Return negative if the memory is not handled by the memory
19179 helper functions and to try a different reload method, 0 if no additional
19180 instructions are need, and positive to give the extra cost for the
19181 memory. */
19183 static int
19184 rs6000_secondary_reload_memory (rtx addr,
19185 enum reg_class rclass,
19186 machine_mode mode)
19188 int extra_cost = 0;
19189 rtx reg, and_arg, plus_arg0, plus_arg1;
19190 addr_mask_type addr_mask;
19191 const char *type = NULL;
19192 const char *fail_msg = NULL;
19194 if (GPR_REG_CLASS_P (rclass))
19195 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19197 else if (rclass == FLOAT_REGS)
19198 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19200 else if (rclass == ALTIVEC_REGS)
19201 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19203 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19204 else if (rclass == VSX_REGS)
19205 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19206 & ~RELOAD_REG_AND_M16);
19208 /* If the register allocator hasn't made up its mind yet on the register
19209 class to use, settle on defaults to use. */
19210 else if (rclass == NO_REGS)
19212 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19213 & ~RELOAD_REG_AND_M16);
19215 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19216 addr_mask &= ~(RELOAD_REG_INDEXED
19217 | RELOAD_REG_PRE_INCDEC
19218 | RELOAD_REG_PRE_MODIFY);
19221 else
19222 addr_mask = 0;
19224 /* If the register isn't valid in this register class, just return now. */
19225 if ((addr_mask & RELOAD_REG_VALID) == 0)
19227 if (TARGET_DEBUG_ADDR)
19229 fprintf (stderr,
19230 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19231 "not valid in class\n",
19232 GET_MODE_NAME (mode), reg_class_names[rclass]);
19233 debug_rtx (addr);
19236 return -1;
19239 switch (GET_CODE (addr))
19241 /* Does the register class supports auto update forms for this mode? We
19242 don't need a scratch register, since the powerpc only supports
19243 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19244 case PRE_INC:
19245 case PRE_DEC:
19246 reg = XEXP (addr, 0);
19247 if (!base_reg_operand (addr, GET_MODE (reg)))
19249 fail_msg = "no base register #1";
19250 extra_cost = -1;
19253 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19255 extra_cost = 1;
19256 type = "update";
19258 break;
19260 case PRE_MODIFY:
19261 reg = XEXP (addr, 0);
19262 plus_arg1 = XEXP (addr, 1);
19263 if (!base_reg_operand (reg, GET_MODE (reg))
19264 || GET_CODE (plus_arg1) != PLUS
19265 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19267 fail_msg = "bad PRE_MODIFY";
19268 extra_cost = -1;
19271 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19273 extra_cost = 1;
19274 type = "update";
19276 break;
19278 /* Do we need to simulate AND -16 to clear the bottom address bits used
19279 in VMX load/stores? Only allow the AND for vector sizes. */
19280 case AND:
19281 and_arg = XEXP (addr, 0);
19282 if (GET_MODE_SIZE (mode) != 16
19283 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19284 || INTVAL (XEXP (addr, 1)) != -16)
19286 fail_msg = "bad Altivec AND #1";
19287 extra_cost = -1;
19290 if (rclass != ALTIVEC_REGS)
19292 if (legitimate_indirect_address_p (and_arg, false))
19293 extra_cost = 1;
19295 else if (legitimate_indexed_address_p (and_arg, false))
19296 extra_cost = 2;
19298 else
19300 fail_msg = "bad Altivec AND #2";
19301 extra_cost = -1;
19304 type = "and";
19306 break;
19308 /* If this is an indirect address, make sure it is a base register. */
19309 case REG:
19310 case SUBREG:
19311 if (!legitimate_indirect_address_p (addr, false))
19313 extra_cost = 1;
19314 type = "move";
19316 break;
19318 /* If this is an indexed address, make sure the register class can handle
19319 indexed addresses for this mode. */
19320 case PLUS:
19321 plus_arg0 = XEXP (addr, 0);
19322 plus_arg1 = XEXP (addr, 1);
19324 /* (plus (plus (reg) (constant)) (constant)) is generated during
19325 push_reload processing, so handle it now. */
19326 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19328 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19330 extra_cost = 1;
19331 type = "offset";
19335 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19336 push_reload processing, so handle it now. */
19337 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19339 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19341 extra_cost = 1;
19342 type = "indexed #2";
19346 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19348 fail_msg = "no base register #2";
19349 extra_cost = -1;
19352 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19354 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19355 || !legitimate_indexed_address_p (addr, false))
19357 extra_cost = 1;
19358 type = "indexed";
19362 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19363 && CONST_INT_P (plus_arg1))
19365 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19367 extra_cost = 1;
19368 type = "vector d-form offset";
19372 /* Make sure the register class can handle offset addresses. */
19373 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19375 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19377 extra_cost = 1;
19378 type = "offset #2";
19382 else
19384 fail_msg = "bad PLUS";
19385 extra_cost = -1;
19388 break;
19390 case LO_SUM:
19391 /* Quad offsets are restricted and can't handle normal addresses. */
19392 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19394 extra_cost = -1;
19395 type = "vector d-form lo_sum";
19398 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19400 fail_msg = "bad LO_SUM";
19401 extra_cost = -1;
19404 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19406 extra_cost = 1;
19407 type = "lo_sum";
19409 break;
19411 /* Static addresses need to create a TOC entry. */
19412 case CONST:
19413 case SYMBOL_REF:
19414 case LABEL_REF:
19415 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19417 extra_cost = -1;
19418 type = "vector d-form lo_sum #2";
19421 else
19423 type = "address";
19424 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19426 break;
19428 /* TOC references look like offsetable memory. */
19429 case UNSPEC:
19430 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19432 fail_msg = "bad UNSPEC";
19433 extra_cost = -1;
19436 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19438 extra_cost = -1;
19439 type = "vector d-form lo_sum #3";
19442 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19444 extra_cost = 1;
19445 type = "toc reference";
19447 break;
19449 default:
19451 fail_msg = "bad address";
19452 extra_cost = -1;
19456 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19458 if (extra_cost < 0)
19459 fprintf (stderr,
19460 "rs6000_secondary_reload_memory error: mode = %s, "
19461 "class = %s, addr_mask = '%s', %s\n",
19462 GET_MODE_NAME (mode),
19463 reg_class_names[rclass],
19464 rs6000_debug_addr_mask (addr_mask, false),
19465 (fail_msg != NULL) ? fail_msg : "<bad address>");
19467 else
19468 fprintf (stderr,
19469 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19470 "addr_mask = '%s', extra cost = %d, %s\n",
19471 GET_MODE_NAME (mode),
19472 reg_class_names[rclass],
19473 rs6000_debug_addr_mask (addr_mask, false),
19474 extra_cost,
19475 (type) ? type : "<none>");
19477 debug_rtx (addr);
19480 return extra_cost;
19483 /* Helper function for rs6000_secondary_reload to return true if a move to a
19484 different register classe is really a simple move. */
19486 static bool
19487 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19488 enum rs6000_reg_type from_type,
19489 machine_mode mode)
19491 int size = GET_MODE_SIZE (mode);
19493 /* Add support for various direct moves available. In this function, we only
19494 look at cases where we don't need any extra registers, and one or more
19495 simple move insns are issued. Originally small integers are not allowed
19496 in FPR/VSX registers. Single precision binary floating is not a simple
19497 move because we need to convert to the single precision memory layout.
19498 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19499 need special direct move handling, which we do not support yet. */
19500 if (TARGET_DIRECT_MOVE
19501 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19502 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19504 if (TARGET_POWERPC64)
19506 /* ISA 2.07: MTVSRD or MVFVSRD. */
19507 if (size == 8)
19508 return true;
19510 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19511 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19512 return true;
19515 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19516 if (TARGET_P8_VECTOR)
19518 if (mode == SImode)
19519 return true;
19521 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19522 return true;
19525 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19526 if (mode == SDmode)
19527 return true;
19530 /* Power6+: MFTGPR or MFFGPR. */
19531 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19532 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19533 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19534 return true;
19536 /* Move to/from SPR. */
19537 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19538 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19539 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19540 return true;
19542 return false;
19545 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19546 special direct moves that involve allocating an extra register, return the
19547 insn code of the helper function if there is such a function or
19548 CODE_FOR_nothing if not. */
19550 static bool
19551 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19552 enum rs6000_reg_type from_type,
19553 machine_mode mode,
19554 secondary_reload_info *sri,
19555 bool altivec_p)
19557 bool ret = false;
19558 enum insn_code icode = CODE_FOR_nothing;
19559 int cost = 0;
19560 int size = GET_MODE_SIZE (mode);
19562 if (TARGET_POWERPC64 && size == 16)
19564 /* Handle moving 128-bit values from GPRs to VSX point registers on
19565 ISA 2.07 (power8, power9) when running in 64-bit mode using
19566 XXPERMDI to glue the two 64-bit values back together. */
19567 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19569 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19570 icode = reg_addr[mode].reload_vsx_gpr;
19573 /* Handle moving 128-bit values from VSX point registers to GPRs on
19574 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19575 bottom 64-bit value. */
19576 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19578 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19579 icode = reg_addr[mode].reload_gpr_vsx;
19583 else if (TARGET_POWERPC64 && mode == SFmode)
19585 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19587 cost = 3; /* xscvdpspn, mfvsrd, and. */
19588 icode = reg_addr[mode].reload_gpr_vsx;
19591 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19593 cost = 2; /* mtvsrz, xscvspdpn. */
19594 icode = reg_addr[mode].reload_vsx_gpr;
19598 else if (!TARGET_POWERPC64 && size == 8)
19600 /* Handle moving 64-bit values from GPRs to floating point registers on
19601 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19602 32-bit values back together. Altivec register classes must be handled
19603 specially since a different instruction is used, and the secondary
19604 reload support requires a single instruction class in the scratch
19605 register constraint. However, right now TFmode is not allowed in
19606 Altivec registers, so the pattern will never match. */
19607 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19609 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19610 icode = reg_addr[mode].reload_fpr_gpr;
19614 if (icode != CODE_FOR_nothing)
19616 ret = true;
19617 if (sri)
19619 sri->icode = icode;
19620 sri->extra_cost = cost;
19624 return ret;
19627 /* Return whether a move between two register classes can be done either
19628 directly (simple move) or via a pattern that uses a single extra temporary
19629 (using ISA 2.07's direct move in this case. */
19631 static bool
19632 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19633 enum rs6000_reg_type from_type,
19634 machine_mode mode,
19635 secondary_reload_info *sri,
19636 bool altivec_p)
19638 /* Fall back to load/store reloads if either type is not a register. */
19639 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19640 return false;
19642 /* If we haven't allocated registers yet, assume the move can be done for the
19643 standard register types. */
19644 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19645 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19646 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19647 return true;
19649 /* Moves to the same set of registers is a simple move for non-specialized
19650 registers. */
19651 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19652 return true;
19654 /* Check whether a simple move can be done directly. */
19655 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19657 if (sri)
19659 sri->icode = CODE_FOR_nothing;
19660 sri->extra_cost = 0;
19662 return true;
19665 /* Now check if we can do it in a few steps. */
19666 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19667 altivec_p);
19670 /* Inform reload about cases where moving X with a mode MODE to a register in
19671 RCLASS requires an extra scratch or immediate register. Return the class
19672 needed for the immediate register.
19674 For VSX and Altivec, we may need a register to convert sp+offset into
19675 reg+sp.
19677 For misaligned 64-bit gpr loads and stores we need a register to
19678 convert an offset address to indirect. */
19680 static reg_class_t
19681 rs6000_secondary_reload (bool in_p,
19682 rtx x,
19683 reg_class_t rclass_i,
19684 machine_mode mode,
19685 secondary_reload_info *sri)
19687 enum reg_class rclass = (enum reg_class) rclass_i;
19688 reg_class_t ret = ALL_REGS;
19689 enum insn_code icode;
19690 bool default_p = false;
19691 bool done_p = false;
19693 /* Allow subreg of memory before/during reload. */
19694 bool memory_p = (MEM_P (x)
19695 || (!reload_completed && GET_CODE (x) == SUBREG
19696 && MEM_P (SUBREG_REG (x))));
19698 sri->icode = CODE_FOR_nothing;
19699 sri->t_icode = CODE_FOR_nothing;
19700 sri->extra_cost = 0;
19701 icode = ((in_p)
19702 ? reg_addr[mode].reload_load
19703 : reg_addr[mode].reload_store);
19705 if (REG_P (x) || register_operand (x, mode))
19707 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19708 bool altivec_p = (rclass == ALTIVEC_REGS);
19709 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19711 if (!in_p)
19712 std::swap (to_type, from_type);
19714 /* Can we do a direct move of some sort? */
19715 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19716 altivec_p))
19718 icode = (enum insn_code)sri->icode;
19719 default_p = false;
19720 done_p = true;
19721 ret = NO_REGS;
19725 /* Make sure 0.0 is not reloaded or forced into memory. */
19726 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19728 ret = NO_REGS;
19729 default_p = false;
19730 done_p = true;
19733 /* If this is a scalar floating point value and we want to load it into the
19734 traditional Altivec registers, do it via a move via a traditional floating
19735 point register, unless we have D-form addressing. Also make sure that
19736 non-zero constants use a FPR. */
19737 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19738 && !mode_supports_vmx_dform (mode)
19739 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19740 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19742 ret = FLOAT_REGS;
19743 default_p = false;
19744 done_p = true;
19747 /* Handle reload of load/stores if we have reload helper functions. */
19748 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19750 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19751 mode);
19753 if (extra_cost >= 0)
19755 done_p = true;
19756 ret = NO_REGS;
19757 if (extra_cost > 0)
19759 sri->extra_cost = extra_cost;
19760 sri->icode = icode;
19765 /* Handle unaligned loads and stores of integer registers. */
19766 if (!done_p && TARGET_POWERPC64
19767 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19768 && memory_p
19769 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19771 rtx addr = XEXP (x, 0);
19772 rtx off = address_offset (addr);
19774 if (off != NULL_RTX)
19776 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19777 unsigned HOST_WIDE_INT offset = INTVAL (off);
19779 /* We need a secondary reload when our legitimate_address_p
19780 says the address is good (as otherwise the entire address
19781 will be reloaded), and the offset is not a multiple of
19782 four or we have an address wrap. Address wrap will only
19783 occur for LO_SUMs since legitimate_offset_address_p
19784 rejects addresses for 16-byte mems that will wrap. */
19785 if (GET_CODE (addr) == LO_SUM
19786 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19787 && ((offset & 3) != 0
19788 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19789 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19790 && (offset & 3) != 0))
19792 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19793 if (in_p)
19794 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19795 : CODE_FOR_reload_di_load);
19796 else
19797 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19798 : CODE_FOR_reload_di_store);
19799 sri->extra_cost = 2;
19800 ret = NO_REGS;
19801 done_p = true;
19803 else
19804 default_p = true;
19806 else
19807 default_p = true;
19810 if (!done_p && !TARGET_POWERPC64
19811 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19812 && memory_p
19813 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19815 rtx addr = XEXP (x, 0);
19816 rtx off = address_offset (addr);
19818 if (off != NULL_RTX)
19820 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19821 unsigned HOST_WIDE_INT offset = INTVAL (off);
19823 /* We need a secondary reload when our legitimate_address_p
19824 says the address is good (as otherwise the entire address
19825 will be reloaded), and we have a wrap.
19827 legitimate_lo_sum_address_p allows LO_SUM addresses to
19828 have any offset so test for wrap in the low 16 bits.
19830 legitimate_offset_address_p checks for the range
19831 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19832 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19833 [0x7ff4,0x7fff] respectively, so test for the
19834 intersection of these ranges, [0x7ffc,0x7fff] and
19835 [0x7ff4,0x7ff7] respectively.
19837 Note that the address we see here may have been
19838 manipulated by legitimize_reload_address. */
19839 if (GET_CODE (addr) == LO_SUM
19840 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19841 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19843 if (in_p)
19844 sri->icode = CODE_FOR_reload_si_load;
19845 else
19846 sri->icode = CODE_FOR_reload_si_store;
19847 sri->extra_cost = 2;
19848 ret = NO_REGS;
19849 done_p = true;
19851 else
19852 default_p = true;
19854 else
19855 default_p = true;
19858 if (!done_p)
19859 default_p = true;
19861 if (default_p)
19862 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19864 gcc_assert (ret != ALL_REGS);
19866 if (TARGET_DEBUG_ADDR)
19868 fprintf (stderr,
19869 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19870 "mode = %s",
19871 reg_class_names[ret],
19872 in_p ? "true" : "false",
19873 reg_class_names[rclass],
19874 GET_MODE_NAME (mode));
19876 if (reload_completed)
19877 fputs (", after reload", stderr);
19879 if (!done_p)
19880 fputs (", done_p not set", stderr);
19882 if (default_p)
19883 fputs (", default secondary reload", stderr);
19885 if (sri->icode != CODE_FOR_nothing)
19886 fprintf (stderr, ", reload func = %s, extra cost = %d",
19887 insn_data[sri->icode].name, sri->extra_cost);
19889 else if (sri->extra_cost > 0)
19890 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19892 fputs ("\n", stderr);
19893 debug_rtx (x);
19896 return ret;
19899 /* Better tracing for rs6000_secondary_reload_inner. */
19901 static void
19902 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19903 bool store_p)
19905 rtx set, clobber;
19907 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19909 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19910 store_p ? "store" : "load");
19912 if (store_p)
19913 set = gen_rtx_SET (mem, reg);
19914 else
19915 set = gen_rtx_SET (reg, mem);
19917 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19918 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19921 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19922 ATTRIBUTE_NORETURN;
19924 static void
19925 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19926 bool store_p)
19928 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19929 gcc_unreachable ();
19932 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19933 reload helper functions. These were identified in
19934 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19935 reload, it calls the insns:
19936 reload_<RELOAD:mode>_<P:mptrsize>_store
19937 reload_<RELOAD:mode>_<P:mptrsize>_load
19939 which in turn calls this function, to do whatever is necessary to create
19940 valid addresses. */
19942 void
19943 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19945 int regno = true_regnum (reg);
19946 machine_mode mode = GET_MODE (reg);
19947 addr_mask_type addr_mask;
19948 rtx addr;
19949 rtx new_addr;
19950 rtx op_reg, op0, op1;
19951 rtx and_op;
19952 rtx cc_clobber;
19953 rtvec rv;
19955 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19956 || !base_reg_operand (scratch, GET_MODE (scratch)))
19957 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19959 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19960 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19962 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19963 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19965 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19966 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19968 else
19969 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19971 /* Make sure the mode is valid in this register class. */
19972 if ((addr_mask & RELOAD_REG_VALID) == 0)
19973 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19975 if (TARGET_DEBUG_ADDR)
19976 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19978 new_addr = addr = XEXP (mem, 0);
19979 switch (GET_CODE (addr))
19981 /* Does the register class support auto update forms for this mode? If
19982 not, do the update now. We don't need a scratch register, since the
19983 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19984 case PRE_INC:
19985 case PRE_DEC:
19986 op_reg = XEXP (addr, 0);
19987 if (!base_reg_operand (op_reg, Pmode))
19988 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19990 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19992 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
19993 new_addr = op_reg;
19995 break;
19997 case PRE_MODIFY:
19998 op0 = XEXP (addr, 0);
19999 op1 = XEXP (addr, 1);
20000 if (!base_reg_operand (op0, Pmode)
20001 || GET_CODE (op1) != PLUS
20002 || !rtx_equal_p (op0, XEXP (op1, 0)))
20003 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20005 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20007 emit_insn (gen_rtx_SET (op0, op1));
20008 new_addr = reg;
20010 break;
20012 /* Do we need to simulate AND -16 to clear the bottom address bits used
20013 in VMX load/stores? */
20014 case AND:
20015 op0 = XEXP (addr, 0);
20016 op1 = XEXP (addr, 1);
20017 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20019 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20020 op_reg = op0;
20022 else if (GET_CODE (op1) == PLUS)
20024 emit_insn (gen_rtx_SET (scratch, op1));
20025 op_reg = scratch;
20028 else
20029 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20031 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20032 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20033 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20034 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20035 new_addr = scratch;
20037 break;
20039 /* If this is an indirect address, make sure it is a base register. */
20040 case REG:
20041 case SUBREG:
20042 if (!base_reg_operand (addr, GET_MODE (addr)))
20044 emit_insn (gen_rtx_SET (scratch, addr));
20045 new_addr = scratch;
20047 break;
20049 /* If this is an indexed address, make sure the register class can handle
20050 indexed addresses for this mode. */
20051 case PLUS:
20052 op0 = XEXP (addr, 0);
20053 op1 = XEXP (addr, 1);
20054 if (!base_reg_operand (op0, Pmode))
20055 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20057 else if (int_reg_operand (op1, Pmode))
20059 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20061 emit_insn (gen_rtx_SET (scratch, addr));
20062 new_addr = scratch;
20066 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20068 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20069 || !quad_address_p (addr, mode, false))
20071 emit_insn (gen_rtx_SET (scratch, addr));
20072 new_addr = scratch;
20076 /* Make sure the register class can handle offset addresses. */
20077 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20079 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20081 emit_insn (gen_rtx_SET (scratch, addr));
20082 new_addr = scratch;
20086 else
20087 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20089 break;
20091 case LO_SUM:
20092 op0 = XEXP (addr, 0);
20093 op1 = XEXP (addr, 1);
20094 if (!base_reg_operand (op0, Pmode))
20095 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20097 else if (int_reg_operand (op1, Pmode))
20099 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20101 emit_insn (gen_rtx_SET (scratch, addr));
20102 new_addr = scratch;
20106 /* Quad offsets are restricted and can't handle normal addresses. */
20107 else if (mode_supports_vsx_dform_quad (mode))
20109 emit_insn (gen_rtx_SET (scratch, addr));
20110 new_addr = scratch;
20113 /* Make sure the register class can handle offset addresses. */
20114 else if (legitimate_lo_sum_address_p (mode, addr, false))
20116 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20118 emit_insn (gen_rtx_SET (scratch, addr));
20119 new_addr = scratch;
20123 else
20124 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20126 break;
20128 case SYMBOL_REF:
20129 case CONST:
20130 case LABEL_REF:
20131 rs6000_emit_move (scratch, addr, Pmode);
20132 new_addr = scratch;
20133 break;
20135 default:
20136 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20139 /* Adjust the address if it changed. */
20140 if (addr != new_addr)
20142 mem = replace_equiv_address_nv (mem, new_addr);
20143 if (TARGET_DEBUG_ADDR)
20144 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20147 /* Now create the move. */
20148 if (store_p)
20149 emit_insn (gen_rtx_SET (mem, reg));
20150 else
20151 emit_insn (gen_rtx_SET (reg, mem));
20153 return;
20156 /* Convert reloads involving 64-bit gprs and misaligned offset
20157 addressing, or multiple 32-bit gprs and offsets that are too large,
20158 to use indirect addressing. */
20160 void
20161 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20163 int regno = true_regnum (reg);
20164 enum reg_class rclass;
20165 rtx addr;
20166 rtx scratch_or_premodify = scratch;
20168 if (TARGET_DEBUG_ADDR)
20170 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20171 store_p ? "store" : "load");
20172 fprintf (stderr, "reg:\n");
20173 debug_rtx (reg);
20174 fprintf (stderr, "mem:\n");
20175 debug_rtx (mem);
20176 fprintf (stderr, "scratch:\n");
20177 debug_rtx (scratch);
20180 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20181 gcc_assert (GET_CODE (mem) == MEM);
20182 rclass = REGNO_REG_CLASS (regno);
20183 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20184 addr = XEXP (mem, 0);
20186 if (GET_CODE (addr) == PRE_MODIFY)
20188 gcc_assert (REG_P (XEXP (addr, 0))
20189 && GET_CODE (XEXP (addr, 1)) == PLUS
20190 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20191 scratch_or_premodify = XEXP (addr, 0);
20192 if (!HARD_REGISTER_P (scratch_or_premodify))
20193 /* If we have a pseudo here then reload will have arranged
20194 to have it replaced, but only in the original insn.
20195 Use the replacement here too. */
20196 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20198 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20199 expressions from the original insn, without unsharing them.
20200 Any RTL that points into the original insn will of course
20201 have register replacements applied. That is why we don't
20202 need to look for replacements under the PLUS. */
20203 addr = XEXP (addr, 1);
20205 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20207 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20209 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20211 /* Now create the move. */
20212 if (store_p)
20213 emit_insn (gen_rtx_SET (mem, reg));
20214 else
20215 emit_insn (gen_rtx_SET (reg, mem));
20217 return;
20220 /* Given an rtx X being reloaded into a reg required to be
20221 in class CLASS, return the class of reg to actually use.
20222 In general this is just CLASS; but on some machines
20223 in some cases it is preferable to use a more restrictive class.
20225 On the RS/6000, we have to return NO_REGS when we want to reload a
20226 floating-point CONST_DOUBLE to force it to be copied to memory.
20228 We also don't want to reload integer values into floating-point
20229 registers if we can at all help it. In fact, this can
20230 cause reload to die, if it tries to generate a reload of CTR
20231 into a FP register and discovers it doesn't have the memory location
20232 required.
20234 ??? Would it be a good idea to have reload do the converse, that is
20235 try to reload floating modes into FP registers if possible?
20238 static enum reg_class
20239 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20241 machine_mode mode = GET_MODE (x);
20242 bool is_constant = CONSTANT_P (x);
20244 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20245 reload class for it. */
20246 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20247 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20248 return NO_REGS;
20250 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20251 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20252 return NO_REGS;
20254 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20255 the reloading of address expressions using PLUS into floating point
20256 registers. */
20257 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20259 if (is_constant)
20261 /* Zero is always allowed in all VSX registers. */
20262 if (x == CONST0_RTX (mode))
20263 return rclass;
20265 /* If this is a vector constant that can be formed with a few Altivec
20266 instructions, we want altivec registers. */
20267 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20268 return ALTIVEC_REGS;
20270 /* If this is an integer constant that can easily be loaded into
20271 vector registers, allow it. */
20272 if (CONST_INT_P (x))
20274 HOST_WIDE_INT value = INTVAL (x);
20276 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20277 2.06 can generate it in the Altivec registers with
20278 VSPLTI<x>. */
20279 if (value == -1)
20281 if (TARGET_P8_VECTOR)
20282 return rclass;
20283 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20284 return ALTIVEC_REGS;
20285 else
20286 return NO_REGS;
20289 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20290 a sign extend in the Altivec registers. */
20291 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20292 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20293 return ALTIVEC_REGS;
20296 /* Force constant to memory. */
20297 return NO_REGS;
20300 /* D-form addressing can easily reload the value. */
20301 if (mode_supports_vmx_dform (mode)
20302 || mode_supports_vsx_dform_quad (mode))
20303 return rclass;
20305 /* If this is a scalar floating point value and we don't have D-form
20306 addressing, prefer the traditional floating point registers so that we
20307 can use D-form (register+offset) addressing. */
20308 if (rclass == VSX_REGS
20309 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20310 return FLOAT_REGS;
20312 /* Prefer the Altivec registers if Altivec is handling the vector
20313 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20314 loads. */
20315 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20316 || mode == V1TImode)
20317 return ALTIVEC_REGS;
20319 return rclass;
20322 if (is_constant || GET_CODE (x) == PLUS)
20324 if (reg_class_subset_p (GENERAL_REGS, rclass))
20325 return GENERAL_REGS;
20326 if (reg_class_subset_p (BASE_REGS, rclass))
20327 return BASE_REGS;
20328 return NO_REGS;
20331 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20332 return GENERAL_REGS;
20334 return rclass;
20337 /* Debug version of rs6000_preferred_reload_class. */
20338 static enum reg_class
20339 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20341 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20343 fprintf (stderr,
20344 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20345 "mode = %s, x:\n",
20346 reg_class_names[ret], reg_class_names[rclass],
20347 GET_MODE_NAME (GET_MODE (x)));
20348 debug_rtx (x);
20350 return ret;
20353 /* If we are copying between FP or AltiVec registers and anything else, we need
20354 a memory location. The exception is when we are targeting ppc64 and the
20355 move to/from fpr to gpr instructions are available. Also, under VSX, you
20356 can copy vector registers from the FP register set to the Altivec register
20357 set and vice versa. */
20359 static bool
20360 rs6000_secondary_memory_needed (enum reg_class from_class,
20361 enum reg_class to_class,
20362 machine_mode mode)
20364 enum rs6000_reg_type from_type, to_type;
20365 bool altivec_p = ((from_class == ALTIVEC_REGS)
20366 || (to_class == ALTIVEC_REGS));
20368 /* If a simple/direct move is available, we don't need secondary memory */
20369 from_type = reg_class_to_reg_type[(int)from_class];
20370 to_type = reg_class_to_reg_type[(int)to_class];
20372 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20373 (secondary_reload_info *)0, altivec_p))
20374 return false;
20376 /* If we have a floating point or vector register class, we need to use
20377 memory to transfer the data. */
20378 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20379 return true;
20381 return false;
20384 /* Debug version of rs6000_secondary_memory_needed. */
20385 static bool
20386 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
20387 enum reg_class to_class,
20388 machine_mode mode)
20390 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
20392 fprintf (stderr,
20393 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20394 "to_class = %s, mode = %s\n",
20395 ret ? "true" : "false",
20396 reg_class_names[from_class],
20397 reg_class_names[to_class],
20398 GET_MODE_NAME (mode));
20400 return ret;
20403 /* Return the register class of a scratch register needed to copy IN into
20404 or out of a register in RCLASS in MODE. If it can be done directly,
20405 NO_REGS is returned. */
20407 static enum reg_class
20408 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20409 rtx in)
20411 int regno;
20413 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20414 #if TARGET_MACHO
20415 && MACHOPIC_INDIRECT
20416 #endif
20419 /* We cannot copy a symbolic operand directly into anything
20420 other than BASE_REGS for TARGET_ELF. So indicate that a
20421 register from BASE_REGS is needed as an intermediate
20422 register.
20424 On Darwin, pic addresses require a load from memory, which
20425 needs a base register. */
20426 if (rclass != BASE_REGS
20427 && (GET_CODE (in) == SYMBOL_REF
20428 || GET_CODE (in) == HIGH
20429 || GET_CODE (in) == LABEL_REF
20430 || GET_CODE (in) == CONST))
20431 return BASE_REGS;
20434 if (GET_CODE (in) == REG)
20436 regno = REGNO (in);
20437 if (regno >= FIRST_PSEUDO_REGISTER)
20439 regno = true_regnum (in);
20440 if (regno >= FIRST_PSEUDO_REGISTER)
20441 regno = -1;
20444 else if (GET_CODE (in) == SUBREG)
20446 regno = true_regnum (in);
20447 if (regno >= FIRST_PSEUDO_REGISTER)
20448 regno = -1;
20450 else
20451 regno = -1;
20453 /* If we have VSX register moves, prefer moving scalar values between
20454 Altivec registers and GPR by going via an FPR (and then via memory)
20455 instead of reloading the secondary memory address for Altivec moves. */
20456 if (TARGET_VSX
20457 && GET_MODE_SIZE (mode) < 16
20458 && !mode_supports_vmx_dform (mode)
20459 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20460 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20461 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20462 && (regno >= 0 && INT_REGNO_P (regno)))))
20463 return FLOAT_REGS;
20465 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20466 into anything. */
20467 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20468 || (regno >= 0 && INT_REGNO_P (regno)))
20469 return NO_REGS;
20471 /* Constants, memory, and VSX registers can go into VSX registers (both the
20472 traditional floating point and the altivec registers). */
20473 if (rclass == VSX_REGS
20474 && (regno == -1 || VSX_REGNO_P (regno)))
20475 return NO_REGS;
20477 /* Constants, memory, and FP registers can go into FP registers. */
20478 if ((regno == -1 || FP_REGNO_P (regno))
20479 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20480 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20482 /* Memory, and AltiVec registers can go into AltiVec registers. */
20483 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20484 && rclass == ALTIVEC_REGS)
20485 return NO_REGS;
20487 /* We can copy among the CR registers. */
20488 if ((rclass == CR_REGS || rclass == CR0_REGS)
20489 && regno >= 0 && CR_REGNO_P (regno))
20490 return NO_REGS;
20492 /* Otherwise, we need GENERAL_REGS. */
20493 return GENERAL_REGS;
20496 /* Debug version of rs6000_secondary_reload_class. */
20497 static enum reg_class
20498 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20499 machine_mode mode, rtx in)
20501 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20502 fprintf (stderr,
20503 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20504 "mode = %s, input rtx:\n",
20505 reg_class_names[ret], reg_class_names[rclass],
20506 GET_MODE_NAME (mode));
20507 debug_rtx (in);
20509 return ret;
20512 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20514 static bool
20515 rs6000_cannot_change_mode_class (machine_mode from,
20516 machine_mode to,
20517 enum reg_class rclass)
20519 unsigned from_size = GET_MODE_SIZE (from);
20520 unsigned to_size = GET_MODE_SIZE (to);
20522 if (from_size != to_size)
20524 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20526 if (reg_classes_intersect_p (xclass, rclass))
20528 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
20529 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
20530 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20531 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20533 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20534 single register under VSX because the scalar part of the register
20535 is in the upper 64-bits, and not the lower 64-bits. Types like
20536 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20537 IEEE floating point can't overlap, and neither can small
20538 values. */
20540 if (to_float128_vector_p && from_float128_vector_p)
20541 return false;
20543 else if (to_float128_vector_p || from_float128_vector_p)
20544 return true;
20546 /* TDmode in floating-mode registers must always go into a register
20547 pair with the most significant word in the even-numbered register
20548 to match ISA requirements. In little-endian mode, this does not
20549 match subreg numbering, so we cannot allow subregs. */
20550 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20551 return true;
20553 if (from_size < 8 || to_size < 8)
20554 return true;
20556 if (from_size == 8 && (8 * to_nregs) != to_size)
20557 return true;
20559 if (to_size == 8 && (8 * from_nregs) != from_size)
20560 return true;
20562 return false;
20564 else
20565 return false;
20568 /* Since the VSX register set includes traditional floating point registers
20569 and altivec registers, just check for the size being different instead of
20570 trying to check whether the modes are vector modes. Otherwise it won't
20571 allow say DF and DI to change classes. For types like TFmode and TDmode
20572 that take 2 64-bit registers, rather than a single 128-bit register, don't
20573 allow subregs of those types to other 128 bit types. */
20574 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20576 unsigned num_regs = (from_size + 15) / 16;
20577 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
20578 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
20579 return true;
20581 return (from_size != 8 && from_size != 16);
20584 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20585 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20586 return true;
20588 return false;
20591 /* Debug version of rs6000_cannot_change_mode_class. */
20592 static bool
20593 rs6000_debug_cannot_change_mode_class (machine_mode from,
20594 machine_mode to,
20595 enum reg_class rclass)
20597 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
20599 fprintf (stderr,
20600 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20601 "to = %s, rclass = %s\n",
20602 ret ? "true" : "false",
20603 GET_MODE_NAME (from), GET_MODE_NAME (to),
20604 reg_class_names[rclass]);
20606 return ret;
20609 /* Return a string to do a move operation of 128 bits of data. */
20611 const char *
20612 rs6000_output_move_128bit (rtx operands[])
20614 rtx dest = operands[0];
20615 rtx src = operands[1];
20616 machine_mode mode = GET_MODE (dest);
20617 int dest_regno;
20618 int src_regno;
20619 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20620 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20622 if (REG_P (dest))
20624 dest_regno = REGNO (dest);
20625 dest_gpr_p = INT_REGNO_P (dest_regno);
20626 dest_fp_p = FP_REGNO_P (dest_regno);
20627 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20628 dest_vsx_p = dest_fp_p | dest_vmx_p;
20630 else
20632 dest_regno = -1;
20633 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20636 if (REG_P (src))
20638 src_regno = REGNO (src);
20639 src_gpr_p = INT_REGNO_P (src_regno);
20640 src_fp_p = FP_REGNO_P (src_regno);
20641 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20642 src_vsx_p = src_fp_p | src_vmx_p;
20644 else
20646 src_regno = -1;
20647 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20650 /* Register moves. */
20651 if (dest_regno >= 0 && src_regno >= 0)
20653 if (dest_gpr_p)
20655 if (src_gpr_p)
20656 return "#";
20658 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20659 return (WORDS_BIG_ENDIAN
20660 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20661 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20663 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20664 return "#";
20667 else if (TARGET_VSX && dest_vsx_p)
20669 if (src_vsx_p)
20670 return "xxlor %x0,%x1,%x1";
20672 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20673 return (WORDS_BIG_ENDIAN
20674 ? "mtvsrdd %x0,%1,%L1"
20675 : "mtvsrdd %x0,%L1,%1");
20677 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20678 return "#";
20681 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20682 return "vor %0,%1,%1";
20684 else if (dest_fp_p && src_fp_p)
20685 return "#";
20688 /* Loads. */
20689 else if (dest_regno >= 0 && MEM_P (src))
20691 if (dest_gpr_p)
20693 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20694 return "lq %0,%1";
20695 else
20696 return "#";
20699 else if (TARGET_ALTIVEC && dest_vmx_p
20700 && altivec_indexed_or_indirect_operand (src, mode))
20701 return "lvx %0,%y1";
20703 else if (TARGET_VSX && dest_vsx_p)
20705 if (mode_supports_vsx_dform_quad (mode)
20706 && quad_address_p (XEXP (src, 0), mode, true))
20707 return "lxv %x0,%1";
20709 else if (TARGET_P9_VECTOR)
20710 return "lxvx %x0,%y1";
20712 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20713 return "lxvw4x %x0,%y1";
20715 else
20716 return "lxvd2x %x0,%y1";
20719 else if (TARGET_ALTIVEC && dest_vmx_p)
20720 return "lvx %0,%y1";
20722 else if (dest_fp_p)
20723 return "#";
20726 /* Stores. */
20727 else if (src_regno >= 0 && MEM_P (dest))
20729 if (src_gpr_p)
20731 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20732 return "stq %1,%0";
20733 else
20734 return "#";
20737 else if (TARGET_ALTIVEC && src_vmx_p
20738 && altivec_indexed_or_indirect_operand (src, mode))
20739 return "stvx %1,%y0";
20741 else if (TARGET_VSX && src_vsx_p)
20743 if (mode_supports_vsx_dform_quad (mode)
20744 && quad_address_p (XEXP (dest, 0), mode, true))
20745 return "stxv %x1,%0";
20747 else if (TARGET_P9_VECTOR)
20748 return "stxvx %x1,%y0";
20750 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20751 return "stxvw4x %x1,%y0";
20753 else
20754 return "stxvd2x %x1,%y0";
20757 else if (TARGET_ALTIVEC && src_vmx_p)
20758 return "stvx %1,%y0";
20760 else if (src_fp_p)
20761 return "#";
20764 /* Constants. */
20765 else if (dest_regno >= 0
20766 && (GET_CODE (src) == CONST_INT
20767 || GET_CODE (src) == CONST_WIDE_INT
20768 || GET_CODE (src) == CONST_DOUBLE
20769 || GET_CODE (src) == CONST_VECTOR))
20771 if (dest_gpr_p)
20772 return "#";
20774 else if ((dest_vmx_p && TARGET_ALTIVEC)
20775 || (dest_vsx_p && TARGET_VSX))
20776 return output_vec_const_move (operands);
20779 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20782 /* Validate a 128-bit move. */
20783 bool
20784 rs6000_move_128bit_ok_p (rtx operands[])
20786 machine_mode mode = GET_MODE (operands[0]);
20787 return (gpc_reg_operand (operands[0], mode)
20788 || gpc_reg_operand (operands[1], mode));
20791 /* Return true if a 128-bit move needs to be split. */
20792 bool
20793 rs6000_split_128bit_ok_p (rtx operands[])
20795 if (!reload_completed)
20796 return false;
20798 if (!gpr_or_gpr_p (operands[0], operands[1]))
20799 return false;
20801 if (quad_load_store_p (operands[0], operands[1]))
20802 return false;
20804 return true;
20808 /* Given a comparison operation, return the bit number in CCR to test. We
20809 know this is a valid comparison.
20811 SCC_P is 1 if this is for an scc. That means that %D will have been
20812 used instead of %C, so the bits will be in different places.
20814 Return -1 if OP isn't a valid comparison for some reason. */
20817 ccr_bit (rtx op, int scc_p)
20819 enum rtx_code code = GET_CODE (op);
20820 machine_mode cc_mode;
20821 int cc_regnum;
20822 int base_bit;
20823 rtx reg;
20825 if (!COMPARISON_P (op))
20826 return -1;
20828 reg = XEXP (op, 0);
20830 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20832 cc_mode = GET_MODE (reg);
20833 cc_regnum = REGNO (reg);
20834 base_bit = 4 * (cc_regnum - CR0_REGNO);
20836 validate_condition_mode (code, cc_mode);
20838 /* When generating a sCOND operation, only positive conditions are
20839 allowed. */
20840 gcc_assert (!scc_p
20841 || code == EQ || code == GT || code == LT || code == UNORDERED
20842 || code == GTU || code == LTU);
20844 switch (code)
20846 case NE:
20847 return scc_p ? base_bit + 3 : base_bit + 2;
20848 case EQ:
20849 return base_bit + 2;
20850 case GT: case GTU: case UNLE:
20851 return base_bit + 1;
20852 case LT: case LTU: case UNGE:
20853 return base_bit;
20854 case ORDERED: case UNORDERED:
20855 return base_bit + 3;
20857 case GE: case GEU:
20858 /* If scc, we will have done a cror to put the bit in the
20859 unordered position. So test that bit. For integer, this is ! LT
20860 unless this is an scc insn. */
20861 return scc_p ? base_bit + 3 : base_bit;
20863 case LE: case LEU:
20864 return scc_p ? base_bit + 3 : base_bit + 1;
20866 default:
20867 gcc_unreachable ();
20871 /* Return the GOT register. */
20874 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20876 /* The second flow pass currently (June 1999) can't update
20877 regs_ever_live without disturbing other parts of the compiler, so
20878 update it here to make the prolog/epilogue code happy. */
20879 if (!can_create_pseudo_p ()
20880 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20881 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20883 crtl->uses_pic_offset_table = 1;
20885 return pic_offset_table_rtx;
20888 static rs6000_stack_t stack_info;
20890 /* Function to init struct machine_function.
20891 This will be called, via a pointer variable,
20892 from push_function_context. */
20894 static struct machine_function *
20895 rs6000_init_machine_status (void)
20897 stack_info.reload_completed = 0;
20898 return ggc_cleared_alloc<machine_function> ();
20901 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20903 /* Write out a function code label. */
20905 void
20906 rs6000_output_function_entry (FILE *file, const char *fname)
20908 if (fname[0] != '.')
20910 switch (DEFAULT_ABI)
20912 default:
20913 gcc_unreachable ();
20915 case ABI_AIX:
20916 if (DOT_SYMBOLS)
20917 putc ('.', file);
20918 else
20919 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20920 break;
20922 case ABI_ELFv2:
20923 case ABI_V4:
20924 case ABI_DARWIN:
20925 break;
20929 RS6000_OUTPUT_BASENAME (file, fname);
20932 /* Print an operand. Recognize special options, documented below. */
20934 #if TARGET_ELF
20935 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20936 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20937 #else
20938 #define SMALL_DATA_RELOC "sda21"
20939 #define SMALL_DATA_REG 0
20940 #endif
20942 void
20943 print_operand (FILE *file, rtx x, int code)
20945 int i;
20946 unsigned HOST_WIDE_INT uval;
20948 switch (code)
20950 /* %a is output_address. */
20952 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20953 output_operand. */
20955 case 'D':
20956 /* Like 'J' but get to the GT bit only. */
20957 gcc_assert (REG_P (x));
20959 /* Bit 1 is GT bit. */
20960 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20962 /* Add one for shift count in rlinm for scc. */
20963 fprintf (file, "%d", i + 1);
20964 return;
20966 case 'e':
20967 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20968 if (! INT_P (x))
20970 output_operand_lossage ("invalid %%e value");
20971 return;
20974 uval = INTVAL (x);
20975 if ((uval & 0xffff) == 0 && uval != 0)
20976 putc ('s', file);
20977 return;
20979 case 'E':
20980 /* X is a CR register. Print the number of the EQ bit of the CR */
20981 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20982 output_operand_lossage ("invalid %%E value");
20983 else
20984 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20985 return;
20987 case 'f':
20988 /* X is a CR register. Print the shift count needed to move it
20989 to the high-order four bits. */
20990 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20991 output_operand_lossage ("invalid %%f value");
20992 else
20993 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20994 return;
20996 case 'F':
20997 /* Similar, but print the count for the rotate in the opposite
20998 direction. */
20999 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21000 output_operand_lossage ("invalid %%F value");
21001 else
21002 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21003 return;
21005 case 'G':
21006 /* X is a constant integer. If it is negative, print "m",
21007 otherwise print "z". This is to make an aze or ame insn. */
21008 if (GET_CODE (x) != CONST_INT)
21009 output_operand_lossage ("invalid %%G value");
21010 else if (INTVAL (x) >= 0)
21011 putc ('z', file);
21012 else
21013 putc ('m', file);
21014 return;
21016 case 'h':
21017 /* If constant, output low-order five bits. Otherwise, write
21018 normally. */
21019 if (INT_P (x))
21020 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21021 else
21022 print_operand (file, x, 0);
21023 return;
21025 case 'H':
21026 /* If constant, output low-order six bits. Otherwise, write
21027 normally. */
21028 if (INT_P (x))
21029 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21030 else
21031 print_operand (file, x, 0);
21032 return;
21034 case 'I':
21035 /* Print `i' if this is a constant, else nothing. */
21036 if (INT_P (x))
21037 putc ('i', file);
21038 return;
21040 case 'j':
21041 /* Write the bit number in CCR for jump. */
21042 i = ccr_bit (x, 0);
21043 if (i == -1)
21044 output_operand_lossage ("invalid %%j code");
21045 else
21046 fprintf (file, "%d", i);
21047 return;
21049 case 'J':
21050 /* Similar, but add one for shift count in rlinm for scc and pass
21051 scc flag to `ccr_bit'. */
21052 i = ccr_bit (x, 1);
21053 if (i == -1)
21054 output_operand_lossage ("invalid %%J code");
21055 else
21056 /* If we want bit 31, write a shift count of zero, not 32. */
21057 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21058 return;
21060 case 'k':
21061 /* X must be a constant. Write the 1's complement of the
21062 constant. */
21063 if (! INT_P (x))
21064 output_operand_lossage ("invalid %%k value");
21065 else
21066 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21067 return;
21069 case 'K':
21070 /* X must be a symbolic constant on ELF. Write an
21071 expression suitable for an 'addi' that adds in the low 16
21072 bits of the MEM. */
21073 if (GET_CODE (x) == CONST)
21075 if (GET_CODE (XEXP (x, 0)) != PLUS
21076 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21077 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21078 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21079 output_operand_lossage ("invalid %%K value");
21081 print_operand_address (file, x);
21082 fputs ("@l", file);
21083 return;
21085 /* %l is output_asm_label. */
21087 case 'L':
21088 /* Write second word of DImode or DFmode reference. Works on register
21089 or non-indexed memory only. */
21090 if (REG_P (x))
21091 fputs (reg_names[REGNO (x) + 1], file);
21092 else if (MEM_P (x))
21094 machine_mode mode = GET_MODE (x);
21095 /* Handle possible auto-increment. Since it is pre-increment and
21096 we have already done it, we can just use an offset of word. */
21097 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21098 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21099 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21100 UNITS_PER_WORD));
21101 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21102 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21103 UNITS_PER_WORD));
21104 else
21105 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21106 UNITS_PER_WORD),
21107 0));
21109 if (small_data_operand (x, GET_MODE (x)))
21110 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21111 reg_names[SMALL_DATA_REG]);
21113 return;
21115 case 'N':
21116 /* Write the number of elements in the vector times 4. */
21117 if (GET_CODE (x) != PARALLEL)
21118 output_operand_lossage ("invalid %%N value");
21119 else
21120 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21121 return;
21123 case 'O':
21124 /* Similar, but subtract 1 first. */
21125 if (GET_CODE (x) != PARALLEL)
21126 output_operand_lossage ("invalid %%O value");
21127 else
21128 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21129 return;
21131 case 'p':
21132 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21133 if (! INT_P (x)
21134 || INTVAL (x) < 0
21135 || (i = exact_log2 (INTVAL (x))) < 0)
21136 output_operand_lossage ("invalid %%p value");
21137 else
21138 fprintf (file, "%d", i);
21139 return;
21141 case 'P':
21142 /* The operand must be an indirect memory reference. The result
21143 is the register name. */
21144 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21145 || REGNO (XEXP (x, 0)) >= 32)
21146 output_operand_lossage ("invalid %%P value");
21147 else
21148 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21149 return;
21151 case 'q':
21152 /* This outputs the logical code corresponding to a boolean
21153 expression. The expression may have one or both operands
21154 negated (if one, only the first one). For condition register
21155 logical operations, it will also treat the negated
21156 CR codes as NOTs, but not handle NOTs of them. */
21158 const char *const *t = 0;
21159 const char *s;
21160 enum rtx_code code = GET_CODE (x);
21161 static const char * const tbl[3][3] = {
21162 { "and", "andc", "nor" },
21163 { "or", "orc", "nand" },
21164 { "xor", "eqv", "xor" } };
21166 if (code == AND)
21167 t = tbl[0];
21168 else if (code == IOR)
21169 t = tbl[1];
21170 else if (code == XOR)
21171 t = tbl[2];
21172 else
21173 output_operand_lossage ("invalid %%q value");
21175 if (GET_CODE (XEXP (x, 0)) != NOT)
21176 s = t[0];
21177 else
21179 if (GET_CODE (XEXP (x, 1)) == NOT)
21180 s = t[2];
21181 else
21182 s = t[1];
21185 fputs (s, file);
21187 return;
21189 case 'Q':
21190 if (! TARGET_MFCRF)
21191 return;
21192 fputc (',', file);
21193 /* FALLTHRU */
21195 case 'R':
21196 /* X is a CR register. Print the mask for `mtcrf'. */
21197 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21198 output_operand_lossage ("invalid %%R value");
21199 else
21200 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21201 return;
21203 case 's':
21204 /* Low 5 bits of 32 - value */
21205 if (! INT_P (x))
21206 output_operand_lossage ("invalid %%s value");
21207 else
21208 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21209 return;
21211 case 't':
21212 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21213 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21215 /* Bit 3 is OV bit. */
21216 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21218 /* If we want bit 31, write a shift count of zero, not 32. */
21219 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21220 return;
21222 case 'T':
21223 /* Print the symbolic name of a branch target register. */
21224 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21225 && REGNO (x) != CTR_REGNO))
21226 output_operand_lossage ("invalid %%T value");
21227 else if (REGNO (x) == LR_REGNO)
21228 fputs ("lr", file);
21229 else
21230 fputs ("ctr", file);
21231 return;
21233 case 'u':
21234 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21235 for use in unsigned operand. */
21236 if (! INT_P (x))
21238 output_operand_lossage ("invalid %%u value");
21239 return;
21242 uval = INTVAL (x);
21243 if ((uval & 0xffff) == 0)
21244 uval >>= 16;
21246 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21247 return;
21249 case 'v':
21250 /* High-order 16 bits of constant for use in signed operand. */
21251 if (! INT_P (x))
21252 output_operand_lossage ("invalid %%v value");
21253 else
21254 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21255 (INTVAL (x) >> 16) & 0xffff);
21256 return;
21258 case 'U':
21259 /* Print `u' if this has an auto-increment or auto-decrement. */
21260 if (MEM_P (x)
21261 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21262 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21263 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21264 putc ('u', file);
21265 return;
21267 case 'V':
21268 /* Print the trap code for this operand. */
21269 switch (GET_CODE (x))
21271 case EQ:
21272 fputs ("eq", file); /* 4 */
21273 break;
21274 case NE:
21275 fputs ("ne", file); /* 24 */
21276 break;
21277 case LT:
21278 fputs ("lt", file); /* 16 */
21279 break;
21280 case LE:
21281 fputs ("le", file); /* 20 */
21282 break;
21283 case GT:
21284 fputs ("gt", file); /* 8 */
21285 break;
21286 case GE:
21287 fputs ("ge", file); /* 12 */
21288 break;
21289 case LTU:
21290 fputs ("llt", file); /* 2 */
21291 break;
21292 case LEU:
21293 fputs ("lle", file); /* 6 */
21294 break;
21295 case GTU:
21296 fputs ("lgt", file); /* 1 */
21297 break;
21298 case GEU:
21299 fputs ("lge", file); /* 5 */
21300 break;
21301 default:
21302 gcc_unreachable ();
21304 break;
21306 case 'w':
21307 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21308 normally. */
21309 if (INT_P (x))
21310 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21311 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21312 else
21313 print_operand (file, x, 0);
21314 return;
21316 case 'x':
21317 /* X is a FPR or Altivec register used in a VSX context. */
21318 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21319 output_operand_lossage ("invalid %%x value");
21320 else
21322 int reg = REGNO (x);
21323 int vsx_reg = (FP_REGNO_P (reg)
21324 ? reg - 32
21325 : reg - FIRST_ALTIVEC_REGNO + 32);
21327 #ifdef TARGET_REGNAMES
21328 if (TARGET_REGNAMES)
21329 fprintf (file, "%%vs%d", vsx_reg);
21330 else
21331 #endif
21332 fprintf (file, "%d", vsx_reg);
21334 return;
21336 case 'X':
21337 if (MEM_P (x)
21338 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21339 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21340 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21341 putc ('x', file);
21342 return;
21344 case 'Y':
21345 /* Like 'L', for third word of TImode/PTImode */
21346 if (REG_P (x))
21347 fputs (reg_names[REGNO (x) + 2], file);
21348 else if (MEM_P (x))
21350 machine_mode mode = GET_MODE (x);
21351 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21352 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21353 output_address (mode, plus_constant (Pmode,
21354 XEXP (XEXP (x, 0), 0), 8));
21355 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21356 output_address (mode, plus_constant (Pmode,
21357 XEXP (XEXP (x, 0), 0), 8));
21358 else
21359 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21360 if (small_data_operand (x, GET_MODE (x)))
21361 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21362 reg_names[SMALL_DATA_REG]);
21364 return;
21366 case 'z':
21367 /* X is a SYMBOL_REF. Write out the name preceded by a
21368 period and without any trailing data in brackets. Used for function
21369 names. If we are configured for System V (or the embedded ABI) on
21370 the PowerPC, do not emit the period, since those systems do not use
21371 TOCs and the like. */
21372 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21374 /* For macho, check to see if we need a stub. */
21375 if (TARGET_MACHO)
21377 const char *name = XSTR (x, 0);
21378 #if TARGET_MACHO
21379 if (darwin_emit_branch_islands
21380 && MACHOPIC_INDIRECT
21381 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21382 name = machopic_indirection_name (x, /*stub_p=*/true);
21383 #endif
21384 assemble_name (file, name);
21386 else if (!DOT_SYMBOLS)
21387 assemble_name (file, XSTR (x, 0));
21388 else
21389 rs6000_output_function_entry (file, XSTR (x, 0));
21390 return;
21392 case 'Z':
21393 /* Like 'L', for last word of TImode/PTImode. */
21394 if (REG_P (x))
21395 fputs (reg_names[REGNO (x) + 3], file);
21396 else if (MEM_P (x))
21398 machine_mode mode = GET_MODE (x);
21399 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21400 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21401 output_address (mode, plus_constant (Pmode,
21402 XEXP (XEXP (x, 0), 0), 12));
21403 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21404 output_address (mode, plus_constant (Pmode,
21405 XEXP (XEXP (x, 0), 0), 12));
21406 else
21407 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21408 if (small_data_operand (x, GET_MODE (x)))
21409 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21410 reg_names[SMALL_DATA_REG]);
21412 return;
21414 /* Print AltiVec memory operand. */
21415 case 'y':
21417 rtx tmp;
21419 gcc_assert (MEM_P (x));
21421 tmp = XEXP (x, 0);
21423 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21424 && GET_CODE (tmp) == AND
21425 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21426 && INTVAL (XEXP (tmp, 1)) == -16)
21427 tmp = XEXP (tmp, 0);
21428 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21429 && GET_CODE (tmp) == PRE_MODIFY)
21430 tmp = XEXP (tmp, 1);
21431 if (REG_P (tmp))
21432 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21433 else
21435 if (GET_CODE (tmp) != PLUS
21436 || !REG_P (XEXP (tmp, 0))
21437 || !REG_P (XEXP (tmp, 1)))
21439 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21440 break;
21443 if (REGNO (XEXP (tmp, 0)) == 0)
21444 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21445 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21446 else
21447 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21448 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21450 break;
21453 case 0:
21454 if (REG_P (x))
21455 fprintf (file, "%s", reg_names[REGNO (x)]);
21456 else if (MEM_P (x))
21458 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21459 know the width from the mode. */
21460 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21461 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21462 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21463 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21464 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21465 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21466 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21467 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21468 else
21469 output_address (GET_MODE (x), XEXP (x, 0));
21471 else
21473 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21474 /* This hack along with a corresponding hack in
21475 rs6000_output_addr_const_extra arranges to output addends
21476 where the assembler expects to find them. eg.
21477 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21478 without this hack would be output as "x@toc+4". We
21479 want "x+4@toc". */
21480 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21481 else
21482 output_addr_const (file, x);
21484 return;
21486 case '&':
21487 if (const char *name = get_some_local_dynamic_name ())
21488 assemble_name (file, name);
21489 else
21490 output_operand_lossage ("'%%&' used without any "
21491 "local dynamic TLS references");
21492 return;
21494 default:
21495 output_operand_lossage ("invalid %%xn code");
21499 /* Print the address of an operand. */
21501 void
21502 print_operand_address (FILE *file, rtx x)
21504 if (REG_P (x))
21505 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21506 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21507 || GET_CODE (x) == LABEL_REF)
21509 output_addr_const (file, x);
21510 if (small_data_operand (x, GET_MODE (x)))
21511 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21512 reg_names[SMALL_DATA_REG]);
21513 else
21514 gcc_assert (!TARGET_TOC);
21516 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21517 && REG_P (XEXP (x, 1)))
21519 if (REGNO (XEXP (x, 0)) == 0)
21520 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21521 reg_names[ REGNO (XEXP (x, 0)) ]);
21522 else
21523 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21524 reg_names[ REGNO (XEXP (x, 1)) ]);
21526 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21527 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21528 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21529 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21530 #if TARGET_MACHO
21531 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21532 && CONSTANT_P (XEXP (x, 1)))
21534 fprintf (file, "lo16(");
21535 output_addr_const (file, XEXP (x, 1));
21536 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21538 #endif
21539 #if TARGET_ELF
21540 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21541 && CONSTANT_P (XEXP (x, 1)))
21543 output_addr_const (file, XEXP (x, 1));
21544 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21546 #endif
21547 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21549 /* This hack along with a corresponding hack in
21550 rs6000_output_addr_const_extra arranges to output addends
21551 where the assembler expects to find them. eg.
21552 (lo_sum (reg 9)
21553 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21554 without this hack would be output as "x@toc+8@l(9)". We
21555 want "x+8@toc@l(9)". */
21556 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21557 if (GET_CODE (x) == LO_SUM)
21558 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21559 else
21560 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21562 else
21563 gcc_unreachable ();
21566 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21568 static bool
21569 rs6000_output_addr_const_extra (FILE *file, rtx x)
21571 if (GET_CODE (x) == UNSPEC)
21572 switch (XINT (x, 1))
21574 case UNSPEC_TOCREL:
21575 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21576 && REG_P (XVECEXP (x, 0, 1))
21577 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21578 output_addr_const (file, XVECEXP (x, 0, 0));
21579 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21581 if (INTVAL (tocrel_offset_oac) >= 0)
21582 fprintf (file, "+");
21583 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21585 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21587 putc ('-', file);
21588 assemble_name (file, toc_label_name);
21589 need_toc_init = 1;
21591 else if (TARGET_ELF)
21592 fputs ("@toc", file);
21593 return true;
21595 #if TARGET_MACHO
21596 case UNSPEC_MACHOPIC_OFFSET:
21597 output_addr_const (file, XVECEXP (x, 0, 0));
21598 putc ('-', file);
21599 machopic_output_function_base_name (file);
21600 return true;
21601 #endif
21603 return false;
21606 /* Target hook for assembling integer objects. The PowerPC version has
21607 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21608 is defined. It also needs to handle DI-mode objects on 64-bit
21609 targets. */
21611 static bool
21612 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21614 #ifdef RELOCATABLE_NEEDS_FIXUP
21615 /* Special handling for SI values. */
21616 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21618 static int recurse = 0;
21620 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21621 the .fixup section. Since the TOC section is already relocated, we
21622 don't need to mark it here. We used to skip the text section, but it
21623 should never be valid for relocated addresses to be placed in the text
21624 section. */
21625 if (DEFAULT_ABI == ABI_V4
21626 && (TARGET_RELOCATABLE || flag_pic > 1)
21627 && in_section != toc_section
21628 && !recurse
21629 && !CONST_SCALAR_INT_P (x)
21630 && CONSTANT_P (x))
21632 char buf[256];
21634 recurse = 1;
21635 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21636 fixuplabelno++;
21637 ASM_OUTPUT_LABEL (asm_out_file, buf);
21638 fprintf (asm_out_file, "\t.long\t(");
21639 output_addr_const (asm_out_file, x);
21640 fprintf (asm_out_file, ")@fixup\n");
21641 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21642 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21643 fprintf (asm_out_file, "\t.long\t");
21644 assemble_name (asm_out_file, buf);
21645 fprintf (asm_out_file, "\n\t.previous\n");
21646 recurse = 0;
21647 return true;
21649 /* Remove initial .'s to turn a -mcall-aixdesc function
21650 address into the address of the descriptor, not the function
21651 itself. */
21652 else if (GET_CODE (x) == SYMBOL_REF
21653 && XSTR (x, 0)[0] == '.'
21654 && DEFAULT_ABI == ABI_AIX)
21656 const char *name = XSTR (x, 0);
21657 while (*name == '.')
21658 name++;
21660 fprintf (asm_out_file, "\t.long\t%s\n", name);
21661 return true;
21664 #endif /* RELOCATABLE_NEEDS_FIXUP */
21665 return default_assemble_integer (x, size, aligned_p);
21668 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21669 /* Emit an assembler directive to set symbol visibility for DECL to
21670 VISIBILITY_TYPE. */
21672 static void
21673 rs6000_assemble_visibility (tree decl, int vis)
21675 if (TARGET_XCOFF)
21676 return;
21678 /* Functions need to have their entry point symbol visibility set as
21679 well as their descriptor symbol visibility. */
21680 if (DEFAULT_ABI == ABI_AIX
21681 && DOT_SYMBOLS
21682 && TREE_CODE (decl) == FUNCTION_DECL)
21684 static const char * const visibility_types[] = {
21685 NULL, "protected", "hidden", "internal"
21688 const char *name, *type;
21690 name = ((* targetm.strip_name_encoding)
21691 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21692 type = visibility_types[vis];
21694 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21695 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21697 else
21698 default_assemble_visibility (decl, vis);
21700 #endif
21702 enum rtx_code
21703 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21705 /* Reversal of FP compares takes care -- an ordered compare
21706 becomes an unordered compare and vice versa. */
21707 if (mode == CCFPmode
21708 && (!flag_finite_math_only
21709 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21710 || code == UNEQ || code == LTGT))
21711 return reverse_condition_maybe_unordered (code);
21712 else
21713 return reverse_condition (code);
21716 /* Generate a compare for CODE. Return a brand-new rtx that
21717 represents the result of the compare. */
21719 static rtx
21720 rs6000_generate_compare (rtx cmp, machine_mode mode)
21722 machine_mode comp_mode;
21723 rtx compare_result;
21724 enum rtx_code code = GET_CODE (cmp);
21725 rtx op0 = XEXP (cmp, 0);
21726 rtx op1 = XEXP (cmp, 1);
21728 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21729 comp_mode = CCmode;
21730 else if (FLOAT_MODE_P (mode))
21731 comp_mode = CCFPmode;
21732 else if (code == GTU || code == LTU
21733 || code == GEU || code == LEU)
21734 comp_mode = CCUNSmode;
21735 else if ((code == EQ || code == NE)
21736 && unsigned_reg_p (op0)
21737 && (unsigned_reg_p (op1)
21738 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21739 /* These are unsigned values, perhaps there will be a later
21740 ordering compare that can be shared with this one. */
21741 comp_mode = CCUNSmode;
21742 else
21743 comp_mode = CCmode;
21745 /* If we have an unsigned compare, make sure we don't have a signed value as
21746 an immediate. */
21747 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21748 && INTVAL (op1) < 0)
21750 op0 = copy_rtx_if_shared (op0);
21751 op1 = force_reg (GET_MODE (op0), op1);
21752 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21755 /* First, the compare. */
21756 compare_result = gen_reg_rtx (comp_mode);
21758 /* IEEE 128-bit support in VSX registers when we do not have hardware
21759 support. */
21760 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21762 rtx libfunc = NULL_RTX;
21763 bool check_nan = false;
21764 rtx dest;
21766 switch (code)
21768 case EQ:
21769 case NE:
21770 libfunc = optab_libfunc (eq_optab, mode);
21771 break;
21773 case GT:
21774 case GE:
21775 libfunc = optab_libfunc (ge_optab, mode);
21776 break;
21778 case LT:
21779 case LE:
21780 libfunc = optab_libfunc (le_optab, mode);
21781 break;
21783 case UNORDERED:
21784 case ORDERED:
21785 libfunc = optab_libfunc (unord_optab, mode);
21786 code = (code == UNORDERED) ? NE : EQ;
21787 break;
21789 case UNGE:
21790 case UNGT:
21791 check_nan = true;
21792 libfunc = optab_libfunc (ge_optab, mode);
21793 code = (code == UNGE) ? GE : GT;
21794 break;
21796 case UNLE:
21797 case UNLT:
21798 check_nan = true;
21799 libfunc = optab_libfunc (le_optab, mode);
21800 code = (code == UNLE) ? LE : LT;
21801 break;
21803 case UNEQ:
21804 case LTGT:
21805 check_nan = true;
21806 libfunc = optab_libfunc (eq_optab, mode);
21807 code = (code = UNEQ) ? EQ : NE;
21808 break;
21810 default:
21811 gcc_unreachable ();
21814 gcc_assert (libfunc);
21816 if (!check_nan)
21817 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21818 SImode, 2, op0, mode, op1, mode);
21820 /* The library signals an exception for signalling NaNs, so we need to
21821 handle isgreater, etc. by first checking isordered. */
21822 else
21824 rtx ne_rtx, normal_dest, unord_dest;
21825 rtx unord_func = optab_libfunc (unord_optab, mode);
21826 rtx join_label = gen_label_rtx ();
21827 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21828 rtx unord_cmp = gen_reg_rtx (comp_mode);
21831 /* Test for either value being a NaN. */
21832 gcc_assert (unord_func);
21833 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21834 SImode, 2, op0, mode, op1,
21835 mode);
21837 /* Set value (0) if either value is a NaN, and jump to the join
21838 label. */
21839 dest = gen_reg_rtx (SImode);
21840 emit_move_insn (dest, const1_rtx);
21841 emit_insn (gen_rtx_SET (unord_cmp,
21842 gen_rtx_COMPARE (comp_mode, unord_dest,
21843 const0_rtx)));
21845 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21846 emit_jump_insn (gen_rtx_SET (pc_rtx,
21847 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21848 join_ref,
21849 pc_rtx)));
21851 /* Do the normal comparison, knowing that the values are not
21852 NaNs. */
21853 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21854 SImode, 2, op0, mode, op1,
21855 mode);
21857 emit_insn (gen_cstoresi4 (dest,
21858 gen_rtx_fmt_ee (code, SImode, normal_dest,
21859 const0_rtx),
21860 normal_dest, const0_rtx));
21862 /* Join NaN and non-Nan paths. Compare dest against 0. */
21863 emit_label (join_label);
21864 code = NE;
21867 emit_insn (gen_rtx_SET (compare_result,
21868 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21871 else
21873 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21874 CLOBBERs to match cmptf_internal2 pattern. */
21875 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21876 && FLOAT128_IBM_P (GET_MODE (op0))
21877 && TARGET_HARD_FLOAT)
21878 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21879 gen_rtvec (10,
21880 gen_rtx_SET (compare_result,
21881 gen_rtx_COMPARE (comp_mode, op0, op1)),
21882 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21883 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21884 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21885 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21886 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21887 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21888 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21889 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21890 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21891 else if (GET_CODE (op1) == UNSPEC
21892 && XINT (op1, 1) == UNSPEC_SP_TEST)
21894 rtx op1b = XVECEXP (op1, 0, 0);
21895 comp_mode = CCEQmode;
21896 compare_result = gen_reg_rtx (CCEQmode);
21897 if (TARGET_64BIT)
21898 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21899 else
21900 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21902 else
21903 emit_insn (gen_rtx_SET (compare_result,
21904 gen_rtx_COMPARE (comp_mode, op0, op1)));
21907 /* Some kinds of FP comparisons need an OR operation;
21908 under flag_finite_math_only we don't bother. */
21909 if (FLOAT_MODE_P (mode)
21910 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21911 && !flag_finite_math_only
21912 && (code == LE || code == GE
21913 || code == UNEQ || code == LTGT
21914 || code == UNGT || code == UNLT))
21916 enum rtx_code or1, or2;
21917 rtx or1_rtx, or2_rtx, compare2_rtx;
21918 rtx or_result = gen_reg_rtx (CCEQmode);
21920 switch (code)
21922 case LE: or1 = LT; or2 = EQ; break;
21923 case GE: or1 = GT; or2 = EQ; break;
21924 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21925 case LTGT: or1 = LT; or2 = GT; break;
21926 case UNGT: or1 = UNORDERED; or2 = GT; break;
21927 case UNLT: or1 = UNORDERED; or2 = LT; break;
21928 default: gcc_unreachable ();
21930 validate_condition_mode (or1, comp_mode);
21931 validate_condition_mode (or2, comp_mode);
21932 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21933 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21934 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21935 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21936 const_true_rtx);
21937 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21939 compare_result = or_result;
21940 code = EQ;
21943 validate_condition_mode (code, GET_MODE (compare_result));
21945 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21949 /* Return the diagnostic message string if the binary operation OP is
21950 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21952 static const char*
21953 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21954 const_tree type1,
21955 const_tree type2)
21957 machine_mode mode1 = TYPE_MODE (type1);
21958 machine_mode mode2 = TYPE_MODE (type2);
21960 /* For complex modes, use the inner type. */
21961 if (COMPLEX_MODE_P (mode1))
21962 mode1 = GET_MODE_INNER (mode1);
21964 if (COMPLEX_MODE_P (mode2))
21965 mode2 = GET_MODE_INNER (mode2);
21967 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21968 double to intermix unless -mfloat128-convert. */
21969 if (mode1 == mode2)
21970 return NULL;
21972 if (!TARGET_FLOAT128_CVT)
21974 if ((mode1 == KFmode && mode2 == IFmode)
21975 || (mode1 == IFmode && mode2 == KFmode))
21976 return N_("__float128 and __ibm128 cannot be used in the same "
21977 "expression");
21979 if (TARGET_IEEEQUAD
21980 && ((mode1 == IFmode && mode2 == TFmode)
21981 || (mode1 == TFmode && mode2 == IFmode)))
21982 return N_("__ibm128 and long double cannot be used in the same "
21983 "expression");
21985 if (!TARGET_IEEEQUAD
21986 && ((mode1 == KFmode && mode2 == TFmode)
21987 || (mode1 == TFmode && mode2 == KFmode)))
21988 return N_("__float128 and long double cannot be used in the same "
21989 "expression");
21992 return NULL;
21996 /* Expand floating point conversion to/from __float128 and __ibm128. */
21998 void
21999 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22001 machine_mode dest_mode = GET_MODE (dest);
22002 machine_mode src_mode = GET_MODE (src);
22003 convert_optab cvt = unknown_optab;
22004 bool do_move = false;
22005 rtx libfunc = NULL_RTX;
22006 rtx dest2;
22007 typedef rtx (*rtx_2func_t) (rtx, rtx);
22008 rtx_2func_t hw_convert = (rtx_2func_t)0;
22009 size_t kf_or_tf;
22011 struct hw_conv_t {
22012 rtx_2func_t from_df;
22013 rtx_2func_t from_sf;
22014 rtx_2func_t from_si_sign;
22015 rtx_2func_t from_si_uns;
22016 rtx_2func_t from_di_sign;
22017 rtx_2func_t from_di_uns;
22018 rtx_2func_t to_df;
22019 rtx_2func_t to_sf;
22020 rtx_2func_t to_si_sign;
22021 rtx_2func_t to_si_uns;
22022 rtx_2func_t to_di_sign;
22023 rtx_2func_t to_di_uns;
22024 } hw_conversions[2] = {
22025 /* convertions to/from KFmode */
22027 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22028 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22029 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22030 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22031 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22032 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22033 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22034 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22035 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22036 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22037 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22038 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22041 /* convertions to/from TFmode */
22043 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22044 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22045 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22046 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22047 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22048 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22049 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22050 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22051 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22052 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22053 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22054 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22058 if (dest_mode == src_mode)
22059 gcc_unreachable ();
22061 /* Eliminate memory operations. */
22062 if (MEM_P (src))
22063 src = force_reg (src_mode, src);
22065 if (MEM_P (dest))
22067 rtx tmp = gen_reg_rtx (dest_mode);
22068 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22069 rs6000_emit_move (dest, tmp, dest_mode);
22070 return;
22073 /* Convert to IEEE 128-bit floating point. */
22074 if (FLOAT128_IEEE_P (dest_mode))
22076 if (dest_mode == KFmode)
22077 kf_or_tf = 0;
22078 else if (dest_mode == TFmode)
22079 kf_or_tf = 1;
22080 else
22081 gcc_unreachable ();
22083 switch (src_mode)
22085 case DFmode:
22086 cvt = sext_optab;
22087 hw_convert = hw_conversions[kf_or_tf].from_df;
22088 break;
22090 case SFmode:
22091 cvt = sext_optab;
22092 hw_convert = hw_conversions[kf_or_tf].from_sf;
22093 break;
22095 case KFmode:
22096 case IFmode:
22097 case TFmode:
22098 if (FLOAT128_IBM_P (src_mode))
22099 cvt = sext_optab;
22100 else
22101 do_move = true;
22102 break;
22104 case SImode:
22105 if (unsigned_p)
22107 cvt = ufloat_optab;
22108 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22110 else
22112 cvt = sfloat_optab;
22113 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22115 break;
22117 case DImode:
22118 if (unsigned_p)
22120 cvt = ufloat_optab;
22121 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22123 else
22125 cvt = sfloat_optab;
22126 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22128 break;
22130 default:
22131 gcc_unreachable ();
22135 /* Convert from IEEE 128-bit floating point. */
22136 else if (FLOAT128_IEEE_P (src_mode))
22138 if (src_mode == KFmode)
22139 kf_or_tf = 0;
22140 else if (src_mode == TFmode)
22141 kf_or_tf = 1;
22142 else
22143 gcc_unreachable ();
22145 switch (dest_mode)
22147 case DFmode:
22148 cvt = trunc_optab;
22149 hw_convert = hw_conversions[kf_or_tf].to_df;
22150 break;
22152 case SFmode:
22153 cvt = trunc_optab;
22154 hw_convert = hw_conversions[kf_or_tf].to_sf;
22155 break;
22157 case KFmode:
22158 case IFmode:
22159 case TFmode:
22160 if (FLOAT128_IBM_P (dest_mode))
22161 cvt = trunc_optab;
22162 else
22163 do_move = true;
22164 break;
22166 case SImode:
22167 if (unsigned_p)
22169 cvt = ufix_optab;
22170 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22172 else
22174 cvt = sfix_optab;
22175 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22177 break;
22179 case DImode:
22180 if (unsigned_p)
22182 cvt = ufix_optab;
22183 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22185 else
22187 cvt = sfix_optab;
22188 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22190 break;
22192 default:
22193 gcc_unreachable ();
22197 /* Both IBM format. */
22198 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22199 do_move = true;
22201 else
22202 gcc_unreachable ();
22204 /* Handle conversion between TFmode/KFmode. */
22205 if (do_move)
22206 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22208 /* Handle conversion if we have hardware support. */
22209 else if (TARGET_FLOAT128_HW && hw_convert)
22210 emit_insn ((hw_convert) (dest, src));
22212 /* Call an external function to do the conversion. */
22213 else if (cvt != unknown_optab)
22215 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22216 gcc_assert (libfunc != NULL_RTX);
22218 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode, 1, src,
22219 src_mode);
22221 gcc_assert (dest2 != NULL_RTX);
22222 if (!rtx_equal_p (dest, dest2))
22223 emit_move_insn (dest, dest2);
22226 else
22227 gcc_unreachable ();
22229 return;
22233 /* Emit the RTL for an sISEL pattern. */
22235 void
22236 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22238 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22241 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22242 can be used as that dest register. Return the dest register. */
22245 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22247 if (op2 == const0_rtx)
22248 return op1;
22250 if (GET_CODE (scratch) == SCRATCH)
22251 scratch = gen_reg_rtx (mode);
22253 if (logical_operand (op2, mode))
22254 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22255 else
22256 emit_insn (gen_rtx_SET (scratch,
22257 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22259 return scratch;
22262 void
22263 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22265 rtx condition_rtx;
22266 machine_mode op_mode;
22267 enum rtx_code cond_code;
22268 rtx result = operands[0];
22270 condition_rtx = rs6000_generate_compare (operands[1], mode);
22271 cond_code = GET_CODE (condition_rtx);
22273 if (cond_code == NE
22274 || cond_code == GE || cond_code == LE
22275 || cond_code == GEU || cond_code == LEU
22276 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22278 rtx not_result = gen_reg_rtx (CCEQmode);
22279 rtx not_op, rev_cond_rtx;
22280 machine_mode cc_mode;
22282 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22284 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22285 SImode, XEXP (condition_rtx, 0), const0_rtx);
22286 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22287 emit_insn (gen_rtx_SET (not_result, not_op));
22288 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22291 op_mode = GET_MODE (XEXP (operands[1], 0));
22292 if (op_mode == VOIDmode)
22293 op_mode = GET_MODE (XEXP (operands[1], 1));
22295 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22297 PUT_MODE (condition_rtx, DImode);
22298 convert_move (result, condition_rtx, 0);
22300 else
22302 PUT_MODE (condition_rtx, SImode);
22303 emit_insn (gen_rtx_SET (result, condition_rtx));
22307 /* Emit a branch of kind CODE to location LOC. */
22309 void
22310 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22312 rtx condition_rtx, loc_ref;
22314 condition_rtx = rs6000_generate_compare (operands[0], mode);
22315 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22316 emit_jump_insn (gen_rtx_SET (pc_rtx,
22317 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22318 loc_ref, pc_rtx)));
22321 /* Return the string to output a conditional branch to LABEL, which is
22322 the operand template of the label, or NULL if the branch is really a
22323 conditional return.
22325 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22326 condition code register and its mode specifies what kind of
22327 comparison we made.
22329 REVERSED is nonzero if we should reverse the sense of the comparison.
22331 INSN is the insn. */
22333 char *
22334 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22336 static char string[64];
22337 enum rtx_code code = GET_CODE (op);
22338 rtx cc_reg = XEXP (op, 0);
22339 machine_mode mode = GET_MODE (cc_reg);
22340 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22341 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22342 int really_reversed = reversed ^ need_longbranch;
22343 char *s = string;
22344 const char *ccode;
22345 const char *pred;
22346 rtx note;
22348 validate_condition_mode (code, mode);
22350 /* Work out which way this really branches. We could use
22351 reverse_condition_maybe_unordered here always but this
22352 makes the resulting assembler clearer. */
22353 if (really_reversed)
22355 /* Reversal of FP compares takes care -- an ordered compare
22356 becomes an unordered compare and vice versa. */
22357 if (mode == CCFPmode)
22358 code = reverse_condition_maybe_unordered (code);
22359 else
22360 code = reverse_condition (code);
22363 switch (code)
22365 /* Not all of these are actually distinct opcodes, but
22366 we distinguish them for clarity of the resulting assembler. */
22367 case NE: case LTGT:
22368 ccode = "ne"; break;
22369 case EQ: case UNEQ:
22370 ccode = "eq"; break;
22371 case GE: case GEU:
22372 ccode = "ge"; break;
22373 case GT: case GTU: case UNGT:
22374 ccode = "gt"; break;
22375 case LE: case LEU:
22376 ccode = "le"; break;
22377 case LT: case LTU: case UNLT:
22378 ccode = "lt"; break;
22379 case UNORDERED: ccode = "un"; break;
22380 case ORDERED: ccode = "nu"; break;
22381 case UNGE: ccode = "nl"; break;
22382 case UNLE: ccode = "ng"; break;
22383 default:
22384 gcc_unreachable ();
22387 /* Maybe we have a guess as to how likely the branch is. */
22388 pred = "";
22389 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22390 if (note != NULL_RTX)
22392 /* PROB is the difference from 50%. */
22393 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22394 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22396 /* Only hint for highly probable/improbable branches on newer cpus when
22397 we have real profile data, as static prediction overrides processor
22398 dynamic prediction. For older cpus we may as well always hint, but
22399 assume not taken for branches that are very close to 50% as a
22400 mispredicted taken branch is more expensive than a
22401 mispredicted not-taken branch. */
22402 if (rs6000_always_hint
22403 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22404 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22405 && br_prob_note_reliable_p (note)))
22407 if (abs (prob) > REG_BR_PROB_BASE / 20
22408 && ((prob > 0) ^ need_longbranch))
22409 pred = "+";
22410 else
22411 pred = "-";
22415 if (label == NULL)
22416 s += sprintf (s, "b%slr%s ", ccode, pred);
22417 else
22418 s += sprintf (s, "b%s%s ", ccode, pred);
22420 /* We need to escape any '%' characters in the reg_names string.
22421 Assume they'd only be the first character.... */
22422 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22423 *s++ = '%';
22424 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22426 if (label != NULL)
22428 /* If the branch distance was too far, we may have to use an
22429 unconditional branch to go the distance. */
22430 if (need_longbranch)
22431 s += sprintf (s, ",$+8\n\tb %s", label);
22432 else
22433 s += sprintf (s, ",%s", label);
22436 return string;
22439 /* Return insn for VSX or Altivec comparisons. */
22441 static rtx
22442 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22444 rtx mask;
22445 machine_mode mode = GET_MODE (op0);
22447 switch (code)
22449 default:
22450 break;
22452 case GE:
22453 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22454 return NULL_RTX;
22455 /* FALLTHRU */
22457 case EQ:
22458 case GT:
22459 case GTU:
22460 case ORDERED:
22461 case UNORDERED:
22462 case UNEQ:
22463 case LTGT:
22464 mask = gen_reg_rtx (mode);
22465 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22466 return mask;
22469 return NULL_RTX;
22472 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22473 DMODE is expected destination mode. This is a recursive function. */
22475 static rtx
22476 rs6000_emit_vector_compare (enum rtx_code rcode,
22477 rtx op0, rtx op1,
22478 machine_mode dmode)
22480 rtx mask;
22481 bool swap_operands = false;
22482 bool try_again = false;
22484 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22485 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22487 /* See if the comparison works as is. */
22488 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22489 if (mask)
22490 return mask;
22492 switch (rcode)
22494 case LT:
22495 rcode = GT;
22496 swap_operands = true;
22497 try_again = true;
22498 break;
22499 case LTU:
22500 rcode = GTU;
22501 swap_operands = true;
22502 try_again = true;
22503 break;
22504 case NE:
22505 case UNLE:
22506 case UNLT:
22507 case UNGE:
22508 case UNGT:
22509 /* Invert condition and try again.
22510 e.g., A != B becomes ~(A==B). */
22512 enum rtx_code rev_code;
22513 enum insn_code nor_code;
22514 rtx mask2;
22516 rev_code = reverse_condition_maybe_unordered (rcode);
22517 if (rev_code == UNKNOWN)
22518 return NULL_RTX;
22520 nor_code = optab_handler (one_cmpl_optab, dmode);
22521 if (nor_code == CODE_FOR_nothing)
22522 return NULL_RTX;
22524 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22525 if (!mask2)
22526 return NULL_RTX;
22528 mask = gen_reg_rtx (dmode);
22529 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22530 return mask;
22532 break;
22533 case GE:
22534 case GEU:
22535 case LE:
22536 case LEU:
22537 /* Try GT/GTU/LT/LTU OR EQ */
22539 rtx c_rtx, eq_rtx;
22540 enum insn_code ior_code;
22541 enum rtx_code new_code;
22543 switch (rcode)
22545 case GE:
22546 new_code = GT;
22547 break;
22549 case GEU:
22550 new_code = GTU;
22551 break;
22553 case LE:
22554 new_code = LT;
22555 break;
22557 case LEU:
22558 new_code = LTU;
22559 break;
22561 default:
22562 gcc_unreachable ();
22565 ior_code = optab_handler (ior_optab, dmode);
22566 if (ior_code == CODE_FOR_nothing)
22567 return NULL_RTX;
22569 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22570 if (!c_rtx)
22571 return NULL_RTX;
22573 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22574 if (!eq_rtx)
22575 return NULL_RTX;
22577 mask = gen_reg_rtx (dmode);
22578 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22579 return mask;
22581 break;
22582 default:
22583 return NULL_RTX;
22586 if (try_again)
22588 if (swap_operands)
22589 std::swap (op0, op1);
22591 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22592 if (mask)
22593 return mask;
22596 /* You only get two chances. */
22597 return NULL_RTX;
22600 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22601 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22602 operands for the relation operation COND. */
22605 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22606 rtx cond, rtx cc_op0, rtx cc_op1)
22608 machine_mode dest_mode = GET_MODE (dest);
22609 machine_mode mask_mode = GET_MODE (cc_op0);
22610 enum rtx_code rcode = GET_CODE (cond);
22611 machine_mode cc_mode = CCmode;
22612 rtx mask;
22613 rtx cond2;
22614 bool invert_move = false;
22616 if (VECTOR_UNIT_NONE_P (dest_mode))
22617 return 0;
22619 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22620 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22622 switch (rcode)
22624 /* Swap operands if we can, and fall back to doing the operation as
22625 specified, and doing a NOR to invert the test. */
22626 case NE:
22627 case UNLE:
22628 case UNLT:
22629 case UNGE:
22630 case UNGT:
22631 /* Invert condition and try again.
22632 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22633 invert_move = true;
22634 rcode = reverse_condition_maybe_unordered (rcode);
22635 if (rcode == UNKNOWN)
22636 return 0;
22637 break;
22639 case GE:
22640 case LE:
22641 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22643 /* Invert condition to avoid compound test. */
22644 invert_move = true;
22645 rcode = reverse_condition (rcode);
22647 break;
22649 case GTU:
22650 case GEU:
22651 case LTU:
22652 case LEU:
22653 /* Mark unsigned tests with CCUNSmode. */
22654 cc_mode = CCUNSmode;
22656 /* Invert condition to avoid compound test if necessary. */
22657 if (rcode == GEU || rcode == LEU)
22659 invert_move = true;
22660 rcode = reverse_condition (rcode);
22662 break;
22664 default:
22665 break;
22668 /* Get the vector mask for the given relational operations. */
22669 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22671 if (!mask)
22672 return 0;
22674 if (invert_move)
22675 std::swap (op_true, op_false);
22677 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22678 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22679 && (GET_CODE (op_true) == CONST_VECTOR
22680 || GET_CODE (op_false) == CONST_VECTOR))
22682 rtx constant_0 = CONST0_RTX (dest_mode);
22683 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22685 if (op_true == constant_m1 && op_false == constant_0)
22687 emit_move_insn (dest, mask);
22688 return 1;
22691 else if (op_true == constant_0 && op_false == constant_m1)
22693 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22694 return 1;
22697 /* If we can't use the vector comparison directly, perhaps we can use
22698 the mask for the true or false fields, instead of loading up a
22699 constant. */
22700 if (op_true == constant_m1)
22701 op_true = mask;
22703 if (op_false == constant_0)
22704 op_false = mask;
22707 if (!REG_P (op_true) && !SUBREG_P (op_true))
22708 op_true = force_reg (dest_mode, op_true);
22710 if (!REG_P (op_false) && !SUBREG_P (op_false))
22711 op_false = force_reg (dest_mode, op_false);
22713 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22714 CONST0_RTX (dest_mode));
22715 emit_insn (gen_rtx_SET (dest,
22716 gen_rtx_IF_THEN_ELSE (dest_mode,
22717 cond2,
22718 op_true,
22719 op_false)));
22720 return 1;
22723 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22724 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22725 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22726 hardware has no such operation. */
22728 static int
22729 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22731 enum rtx_code code = GET_CODE (op);
22732 rtx op0 = XEXP (op, 0);
22733 rtx op1 = XEXP (op, 1);
22734 machine_mode compare_mode = GET_MODE (op0);
22735 machine_mode result_mode = GET_MODE (dest);
22736 bool max_p = false;
22738 if (result_mode != compare_mode)
22739 return 0;
22741 if (code == GE || code == GT)
22742 max_p = true;
22743 else if (code == LE || code == LT)
22744 max_p = false;
22745 else
22746 return 0;
22748 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22751 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22752 max_p = !max_p;
22754 else
22755 return 0;
22757 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22758 return 1;
22761 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22762 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22763 operands of the last comparison is nonzero/true, FALSE_COND if it is
22764 zero/false. Return 0 if the hardware has no such operation. */
22766 static int
22767 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22769 enum rtx_code code = GET_CODE (op);
22770 rtx op0 = XEXP (op, 0);
22771 rtx op1 = XEXP (op, 1);
22772 machine_mode result_mode = GET_MODE (dest);
22773 rtx compare_rtx;
22774 rtx cmove_rtx;
22775 rtx clobber_rtx;
22777 if (!can_create_pseudo_p ())
22778 return 0;
22780 switch (code)
22782 case EQ:
22783 case GE:
22784 case GT:
22785 break;
22787 case NE:
22788 case LT:
22789 case LE:
22790 code = swap_condition (code);
22791 std::swap (op0, op1);
22792 break;
22794 default:
22795 return 0;
22798 /* Generate: [(parallel [(set (dest)
22799 (if_then_else (op (cmp1) (cmp2))
22800 (true)
22801 (false)))
22802 (clobber (scratch))])]. */
22804 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22805 cmove_rtx = gen_rtx_SET (dest,
22806 gen_rtx_IF_THEN_ELSE (result_mode,
22807 compare_rtx,
22808 true_cond,
22809 false_cond));
22811 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22812 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22813 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22815 return 1;
22818 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22819 operands of the last comparison is nonzero/true, FALSE_COND if it
22820 is zero/false. Return 0 if the hardware has no such operation. */
22823 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22825 enum rtx_code code = GET_CODE (op);
22826 rtx op0 = XEXP (op, 0);
22827 rtx op1 = XEXP (op, 1);
22828 machine_mode compare_mode = GET_MODE (op0);
22829 machine_mode result_mode = GET_MODE (dest);
22830 rtx temp;
22831 bool is_against_zero;
22833 /* These modes should always match. */
22834 if (GET_MODE (op1) != compare_mode
22835 /* In the isel case however, we can use a compare immediate, so
22836 op1 may be a small constant. */
22837 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22838 return 0;
22839 if (GET_MODE (true_cond) != result_mode)
22840 return 0;
22841 if (GET_MODE (false_cond) != result_mode)
22842 return 0;
22844 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22845 if (TARGET_P9_MINMAX
22846 && (compare_mode == SFmode || compare_mode == DFmode)
22847 && (result_mode == SFmode || result_mode == DFmode))
22849 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22850 return 1;
22852 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22853 return 1;
22856 /* Don't allow using floating point comparisons for integer results for
22857 now. */
22858 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22859 return 0;
22861 /* First, work out if the hardware can do this at all, or
22862 if it's too slow.... */
22863 if (!FLOAT_MODE_P (compare_mode))
22865 if (TARGET_ISEL)
22866 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22867 return 0;
22870 is_against_zero = op1 == CONST0_RTX (compare_mode);
22872 /* A floating-point subtract might overflow, underflow, or produce
22873 an inexact result, thus changing the floating-point flags, so it
22874 can't be generated if we care about that. It's safe if one side
22875 of the construct is zero, since then no subtract will be
22876 generated. */
22877 if (SCALAR_FLOAT_MODE_P (compare_mode)
22878 && flag_trapping_math && ! is_against_zero)
22879 return 0;
22881 /* Eliminate half of the comparisons by switching operands, this
22882 makes the remaining code simpler. */
22883 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22884 || code == LTGT || code == LT || code == UNLE)
22886 code = reverse_condition_maybe_unordered (code);
22887 temp = true_cond;
22888 true_cond = false_cond;
22889 false_cond = temp;
22892 /* UNEQ and LTGT take four instructions for a comparison with zero,
22893 it'll probably be faster to use a branch here too. */
22894 if (code == UNEQ && HONOR_NANS (compare_mode))
22895 return 0;
22897 /* We're going to try to implement comparisons by performing
22898 a subtract, then comparing against zero. Unfortunately,
22899 Inf - Inf is NaN which is not zero, and so if we don't
22900 know that the operand is finite and the comparison
22901 would treat EQ different to UNORDERED, we can't do it. */
22902 if (HONOR_INFINITIES (compare_mode)
22903 && code != GT && code != UNGE
22904 && (GET_CODE (op1) != CONST_DOUBLE
22905 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22906 /* Constructs of the form (a OP b ? a : b) are safe. */
22907 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22908 || (! rtx_equal_p (op0, true_cond)
22909 && ! rtx_equal_p (op1, true_cond))))
22910 return 0;
22912 /* At this point we know we can use fsel. */
22914 /* Reduce the comparison to a comparison against zero. */
22915 if (! is_against_zero)
22917 temp = gen_reg_rtx (compare_mode);
22918 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22919 op0 = temp;
22920 op1 = CONST0_RTX (compare_mode);
22923 /* If we don't care about NaNs we can reduce some of the comparisons
22924 down to faster ones. */
22925 if (! HONOR_NANS (compare_mode))
22926 switch (code)
22928 case GT:
22929 code = LE;
22930 temp = true_cond;
22931 true_cond = false_cond;
22932 false_cond = temp;
22933 break;
22934 case UNGE:
22935 code = GE;
22936 break;
22937 case UNEQ:
22938 code = EQ;
22939 break;
22940 default:
22941 break;
22944 /* Now, reduce everything down to a GE. */
22945 switch (code)
22947 case GE:
22948 break;
22950 case LE:
22951 temp = gen_reg_rtx (compare_mode);
22952 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22953 op0 = temp;
22954 break;
22956 case ORDERED:
22957 temp = gen_reg_rtx (compare_mode);
22958 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22959 op0 = temp;
22960 break;
22962 case EQ:
22963 temp = gen_reg_rtx (compare_mode);
22964 emit_insn (gen_rtx_SET (temp,
22965 gen_rtx_NEG (compare_mode,
22966 gen_rtx_ABS (compare_mode, op0))));
22967 op0 = temp;
22968 break;
22970 case UNGE:
22971 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22972 temp = gen_reg_rtx (result_mode);
22973 emit_insn (gen_rtx_SET (temp,
22974 gen_rtx_IF_THEN_ELSE (result_mode,
22975 gen_rtx_GE (VOIDmode,
22976 op0, op1),
22977 true_cond, false_cond)));
22978 false_cond = true_cond;
22979 true_cond = temp;
22981 temp = gen_reg_rtx (compare_mode);
22982 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22983 op0 = temp;
22984 break;
22986 case GT:
22987 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22988 temp = gen_reg_rtx (result_mode);
22989 emit_insn (gen_rtx_SET (temp,
22990 gen_rtx_IF_THEN_ELSE (result_mode,
22991 gen_rtx_GE (VOIDmode,
22992 op0, op1),
22993 true_cond, false_cond)));
22994 true_cond = false_cond;
22995 false_cond = temp;
22997 temp = gen_reg_rtx (compare_mode);
22998 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22999 op0 = temp;
23000 break;
23002 default:
23003 gcc_unreachable ();
23006 emit_insn (gen_rtx_SET (dest,
23007 gen_rtx_IF_THEN_ELSE (result_mode,
23008 gen_rtx_GE (VOIDmode,
23009 op0, op1),
23010 true_cond, false_cond)));
23011 return 1;
23014 /* Same as above, but for ints (isel). */
23016 static int
23017 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23019 rtx condition_rtx, cr;
23020 machine_mode mode = GET_MODE (dest);
23021 enum rtx_code cond_code;
23022 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23023 bool signedp;
23025 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23026 return 0;
23028 /* We still have to do the compare, because isel doesn't do a
23029 compare, it just looks at the CRx bits set by a previous compare
23030 instruction. */
23031 condition_rtx = rs6000_generate_compare (op, mode);
23032 cond_code = GET_CODE (condition_rtx);
23033 cr = XEXP (condition_rtx, 0);
23034 signedp = GET_MODE (cr) == CCmode;
23036 isel_func = (mode == SImode
23037 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23038 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23040 switch (cond_code)
23042 case LT: case GT: case LTU: case GTU: case EQ:
23043 /* isel handles these directly. */
23044 break;
23046 default:
23047 /* We need to swap the sense of the comparison. */
23049 std::swap (false_cond, true_cond);
23050 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23052 break;
23055 false_cond = force_reg (mode, false_cond);
23056 if (true_cond != const0_rtx)
23057 true_cond = force_reg (mode, true_cond);
23059 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23061 return 1;
23064 const char *
23065 output_isel (rtx *operands)
23067 enum rtx_code code;
23069 code = GET_CODE (operands[1]);
23071 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23073 gcc_assert (GET_CODE (operands[2]) == REG
23074 && GET_CODE (operands[3]) == REG);
23075 PUT_CODE (operands[1], reverse_condition (code));
23076 return "isel %0,%3,%2,%j1";
23079 return "isel %0,%2,%3,%j1";
23082 void
23083 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23085 machine_mode mode = GET_MODE (op0);
23086 enum rtx_code c;
23087 rtx target;
23089 /* VSX/altivec have direct min/max insns. */
23090 if ((code == SMAX || code == SMIN)
23091 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23092 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23094 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23095 return;
23098 if (code == SMAX || code == SMIN)
23099 c = GE;
23100 else
23101 c = GEU;
23103 if (code == SMAX || code == UMAX)
23104 target = emit_conditional_move (dest, c, op0, op1, mode,
23105 op0, op1, mode, 0);
23106 else
23107 target = emit_conditional_move (dest, c, op0, op1, mode,
23108 op1, op0, mode, 0);
23109 gcc_assert (target);
23110 if (target != dest)
23111 emit_move_insn (dest, target);
23114 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23115 for the value to come from memory or if it is already loaded into a GPR. */
23117 void
23118 rs6000_split_signbit (rtx dest, rtx src)
23120 machine_mode d_mode = GET_MODE (dest);
23121 machine_mode s_mode = GET_MODE (src);
23122 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23123 rtx shift_reg = dest_di;
23125 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
23127 if (MEM_P (src))
23129 rtx mem = (WORDS_BIG_ENDIAN
23130 ? adjust_address (src, DImode, 0)
23131 : adjust_address (src, DImode, 8));
23132 emit_insn (gen_rtx_SET (dest_di, mem));
23135 else
23137 unsigned int r = reg_or_subregno (src);
23139 if (INT_REGNO_P (r))
23140 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
23142 else
23144 /* Generate the special mfvsrd instruction to get it in a GPR. */
23145 gcc_assert (VSX_REGNO_P (r));
23146 if (s_mode == KFmode)
23147 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23148 else
23149 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23153 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23154 return;
23157 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23158 COND is true. Mark the jump as unlikely to be taken. */
23160 static void
23161 emit_unlikely_jump (rtx cond, rtx label)
23163 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23164 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23165 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23168 /* A subroutine of the atomic operation splitters. Emit a load-locked
23169 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23170 the zero_extend operation. */
23172 static void
23173 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23175 rtx (*fn) (rtx, rtx) = NULL;
23177 switch (mode)
23179 case QImode:
23180 fn = gen_load_lockedqi;
23181 break;
23182 case HImode:
23183 fn = gen_load_lockedhi;
23184 break;
23185 case SImode:
23186 if (GET_MODE (mem) == QImode)
23187 fn = gen_load_lockedqi_si;
23188 else if (GET_MODE (mem) == HImode)
23189 fn = gen_load_lockedhi_si;
23190 else
23191 fn = gen_load_lockedsi;
23192 break;
23193 case DImode:
23194 fn = gen_load_lockeddi;
23195 break;
23196 case TImode:
23197 fn = gen_load_lockedti;
23198 break;
23199 default:
23200 gcc_unreachable ();
23202 emit_insn (fn (reg, mem));
23205 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23206 instruction in MODE. */
23208 static void
23209 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23211 rtx (*fn) (rtx, rtx, rtx) = NULL;
23213 switch (mode)
23215 case QImode:
23216 fn = gen_store_conditionalqi;
23217 break;
23218 case HImode:
23219 fn = gen_store_conditionalhi;
23220 break;
23221 case SImode:
23222 fn = gen_store_conditionalsi;
23223 break;
23224 case DImode:
23225 fn = gen_store_conditionaldi;
23226 break;
23227 case TImode:
23228 fn = gen_store_conditionalti;
23229 break;
23230 default:
23231 gcc_unreachable ();
23234 /* Emit sync before stwcx. to address PPC405 Erratum. */
23235 if (PPC405_ERRATUM77)
23236 emit_insn (gen_hwsync ());
23238 emit_insn (fn (res, mem, val));
23241 /* Expand barriers before and after a load_locked/store_cond sequence. */
23243 static rtx
23244 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23246 rtx addr = XEXP (mem, 0);
23248 if (!legitimate_indirect_address_p (addr, reload_completed)
23249 && !legitimate_indexed_address_p (addr, reload_completed))
23251 addr = force_reg (Pmode, addr);
23252 mem = replace_equiv_address_nv (mem, addr);
23255 switch (model)
23257 case MEMMODEL_RELAXED:
23258 case MEMMODEL_CONSUME:
23259 case MEMMODEL_ACQUIRE:
23260 break;
23261 case MEMMODEL_RELEASE:
23262 case MEMMODEL_ACQ_REL:
23263 emit_insn (gen_lwsync ());
23264 break;
23265 case MEMMODEL_SEQ_CST:
23266 emit_insn (gen_hwsync ());
23267 break;
23268 default:
23269 gcc_unreachable ();
23271 return mem;
23274 static void
23275 rs6000_post_atomic_barrier (enum memmodel model)
23277 switch (model)
23279 case MEMMODEL_RELAXED:
23280 case MEMMODEL_CONSUME:
23281 case MEMMODEL_RELEASE:
23282 break;
23283 case MEMMODEL_ACQUIRE:
23284 case MEMMODEL_ACQ_REL:
23285 case MEMMODEL_SEQ_CST:
23286 emit_insn (gen_isync ());
23287 break;
23288 default:
23289 gcc_unreachable ();
23293 /* A subroutine of the various atomic expanders. For sub-word operations,
23294 we must adjust things to operate on SImode. Given the original MEM,
23295 return a new aligned memory. Also build and return the quantities by
23296 which to shift and mask. */
23298 static rtx
23299 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23301 rtx addr, align, shift, mask, mem;
23302 HOST_WIDE_INT shift_mask;
23303 machine_mode mode = GET_MODE (orig_mem);
23305 /* For smaller modes, we have to implement this via SImode. */
23306 shift_mask = (mode == QImode ? 0x18 : 0x10);
23308 addr = XEXP (orig_mem, 0);
23309 addr = force_reg (GET_MODE (addr), addr);
23311 /* Aligned memory containing subword. Generate a new memory. We
23312 do not want any of the existing MEM_ATTR data, as we're now
23313 accessing memory outside the original object. */
23314 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23315 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23316 mem = gen_rtx_MEM (SImode, align);
23317 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23318 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23319 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23321 /* Shift amount for subword relative to aligned word. */
23322 shift = gen_reg_rtx (SImode);
23323 addr = gen_lowpart (SImode, addr);
23324 rtx tmp = gen_reg_rtx (SImode);
23325 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23326 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23327 if (BYTES_BIG_ENDIAN)
23328 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23329 shift, 1, OPTAB_LIB_WIDEN);
23330 *pshift = shift;
23332 /* Mask for insertion. */
23333 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23334 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23335 *pmask = mask;
23337 return mem;
23340 /* A subroutine of the various atomic expanders. For sub-word operands,
23341 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23343 static rtx
23344 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23346 rtx x;
23348 x = gen_reg_rtx (SImode);
23349 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23350 gen_rtx_NOT (SImode, mask),
23351 oldval)));
23353 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23355 return x;
23358 /* A subroutine of the various atomic expanders. For sub-word operands,
23359 extract WIDE to NARROW via SHIFT. */
23361 static void
23362 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23364 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23365 wide, 1, OPTAB_LIB_WIDEN);
23366 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23369 /* Expand an atomic compare and swap operation. */
23371 void
23372 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23374 rtx boolval, retval, mem, oldval, newval, cond;
23375 rtx label1, label2, x, mask, shift;
23376 machine_mode mode, orig_mode;
23377 enum memmodel mod_s, mod_f;
23378 bool is_weak;
23380 boolval = operands[0];
23381 retval = operands[1];
23382 mem = operands[2];
23383 oldval = operands[3];
23384 newval = operands[4];
23385 is_weak = (INTVAL (operands[5]) != 0);
23386 mod_s = memmodel_base (INTVAL (operands[6]));
23387 mod_f = memmodel_base (INTVAL (operands[7]));
23388 orig_mode = mode = GET_MODE (mem);
23390 mask = shift = NULL_RTX;
23391 if (mode == QImode || mode == HImode)
23393 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23394 lwarx and shift/mask operations. With power8, we need to do the
23395 comparison in SImode, but the store is still done in QI/HImode. */
23396 oldval = convert_modes (SImode, mode, oldval, 1);
23398 if (!TARGET_SYNC_HI_QI)
23400 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23402 /* Shift and mask OLDVAL into position with the word. */
23403 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23404 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23406 /* Shift and mask NEWVAL into position within the word. */
23407 newval = convert_modes (SImode, mode, newval, 1);
23408 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23409 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23412 /* Prepare to adjust the return value. */
23413 retval = gen_reg_rtx (SImode);
23414 mode = SImode;
23416 else if (reg_overlap_mentioned_p (retval, oldval))
23417 oldval = copy_to_reg (oldval);
23419 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23420 oldval = copy_to_mode_reg (mode, oldval);
23422 if (reg_overlap_mentioned_p (retval, newval))
23423 newval = copy_to_reg (newval);
23425 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23427 label1 = NULL_RTX;
23428 if (!is_weak)
23430 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23431 emit_label (XEXP (label1, 0));
23433 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23435 emit_load_locked (mode, retval, mem);
23437 x = retval;
23438 if (mask)
23439 x = expand_simple_binop (SImode, AND, retval, mask,
23440 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23442 cond = gen_reg_rtx (CCmode);
23443 /* If we have TImode, synthesize a comparison. */
23444 if (mode != TImode)
23445 x = gen_rtx_COMPARE (CCmode, x, oldval);
23446 else
23448 rtx xor1_result = gen_reg_rtx (DImode);
23449 rtx xor2_result = gen_reg_rtx (DImode);
23450 rtx or_result = gen_reg_rtx (DImode);
23451 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23452 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23453 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23454 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23456 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23457 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23458 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23459 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23462 emit_insn (gen_rtx_SET (cond, x));
23464 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23465 emit_unlikely_jump (x, label2);
23467 x = newval;
23468 if (mask)
23469 x = rs6000_mask_atomic_subword (retval, newval, mask);
23471 emit_store_conditional (orig_mode, cond, mem, x);
23473 if (!is_weak)
23475 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23476 emit_unlikely_jump (x, label1);
23479 if (!is_mm_relaxed (mod_f))
23480 emit_label (XEXP (label2, 0));
23482 rs6000_post_atomic_barrier (mod_s);
23484 if (is_mm_relaxed (mod_f))
23485 emit_label (XEXP (label2, 0));
23487 if (shift)
23488 rs6000_finish_atomic_subword (operands[1], retval, shift);
23489 else if (mode != GET_MODE (operands[1]))
23490 convert_move (operands[1], retval, 1);
23492 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23493 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23494 emit_insn (gen_rtx_SET (boolval, x));
23497 /* Expand an atomic exchange operation. */
23499 void
23500 rs6000_expand_atomic_exchange (rtx operands[])
23502 rtx retval, mem, val, cond;
23503 machine_mode mode;
23504 enum memmodel model;
23505 rtx label, x, mask, shift;
23507 retval = operands[0];
23508 mem = operands[1];
23509 val = operands[2];
23510 model = memmodel_base (INTVAL (operands[3]));
23511 mode = GET_MODE (mem);
23513 mask = shift = NULL_RTX;
23514 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23516 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23518 /* Shift and mask VAL into position with the word. */
23519 val = convert_modes (SImode, mode, val, 1);
23520 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23521 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23523 /* Prepare to adjust the return value. */
23524 retval = gen_reg_rtx (SImode);
23525 mode = SImode;
23528 mem = rs6000_pre_atomic_barrier (mem, model);
23530 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23531 emit_label (XEXP (label, 0));
23533 emit_load_locked (mode, retval, mem);
23535 x = val;
23536 if (mask)
23537 x = rs6000_mask_atomic_subword (retval, val, mask);
23539 cond = gen_reg_rtx (CCmode);
23540 emit_store_conditional (mode, cond, mem, x);
23542 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23543 emit_unlikely_jump (x, label);
23545 rs6000_post_atomic_barrier (model);
23547 if (shift)
23548 rs6000_finish_atomic_subword (operands[0], retval, shift);
23551 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23552 to perform. MEM is the memory on which to operate. VAL is the second
23553 operand of the binary operator. BEFORE and AFTER are optional locations to
23554 return the value of MEM either before of after the operation. MODEL_RTX
23555 is a CONST_INT containing the memory model to use. */
23557 void
23558 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23559 rtx orig_before, rtx orig_after, rtx model_rtx)
23561 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23562 machine_mode mode = GET_MODE (mem);
23563 machine_mode store_mode = mode;
23564 rtx label, x, cond, mask, shift;
23565 rtx before = orig_before, after = orig_after;
23567 mask = shift = NULL_RTX;
23568 /* On power8, we want to use SImode for the operation. On previous systems,
23569 use the operation in a subword and shift/mask to get the proper byte or
23570 halfword. */
23571 if (mode == QImode || mode == HImode)
23573 if (TARGET_SYNC_HI_QI)
23575 val = convert_modes (SImode, mode, val, 1);
23577 /* Prepare to adjust the return value. */
23578 before = gen_reg_rtx (SImode);
23579 if (after)
23580 after = gen_reg_rtx (SImode);
23581 mode = SImode;
23583 else
23585 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23587 /* Shift and mask VAL into position with the word. */
23588 val = convert_modes (SImode, mode, val, 1);
23589 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23590 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23592 switch (code)
23594 case IOR:
23595 case XOR:
23596 /* We've already zero-extended VAL. That is sufficient to
23597 make certain that it does not affect other bits. */
23598 mask = NULL;
23599 break;
23601 case AND:
23602 /* If we make certain that all of the other bits in VAL are
23603 set, that will be sufficient to not affect other bits. */
23604 x = gen_rtx_NOT (SImode, mask);
23605 x = gen_rtx_IOR (SImode, x, val);
23606 emit_insn (gen_rtx_SET (val, x));
23607 mask = NULL;
23608 break;
23610 case NOT:
23611 case PLUS:
23612 case MINUS:
23613 /* These will all affect bits outside the field and need
23614 adjustment via MASK within the loop. */
23615 break;
23617 default:
23618 gcc_unreachable ();
23621 /* Prepare to adjust the return value. */
23622 before = gen_reg_rtx (SImode);
23623 if (after)
23624 after = gen_reg_rtx (SImode);
23625 store_mode = mode = SImode;
23629 mem = rs6000_pre_atomic_barrier (mem, model);
23631 label = gen_label_rtx ();
23632 emit_label (label);
23633 label = gen_rtx_LABEL_REF (VOIDmode, label);
23635 if (before == NULL_RTX)
23636 before = gen_reg_rtx (mode);
23638 emit_load_locked (mode, before, mem);
23640 if (code == NOT)
23642 x = expand_simple_binop (mode, AND, before, val,
23643 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23644 after = expand_simple_unop (mode, NOT, x, after, 1);
23646 else
23648 after = expand_simple_binop (mode, code, before, val,
23649 after, 1, OPTAB_LIB_WIDEN);
23652 x = after;
23653 if (mask)
23655 x = expand_simple_binop (SImode, AND, after, mask,
23656 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23657 x = rs6000_mask_atomic_subword (before, x, mask);
23659 else if (store_mode != mode)
23660 x = convert_modes (store_mode, mode, x, 1);
23662 cond = gen_reg_rtx (CCmode);
23663 emit_store_conditional (store_mode, cond, mem, x);
23665 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23666 emit_unlikely_jump (x, label);
23668 rs6000_post_atomic_barrier (model);
23670 if (shift)
23672 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23673 then do the calcuations in a SImode register. */
23674 if (orig_before)
23675 rs6000_finish_atomic_subword (orig_before, before, shift);
23676 if (orig_after)
23677 rs6000_finish_atomic_subword (orig_after, after, shift);
23679 else if (store_mode != mode)
23681 /* QImode/HImode on machines with lbarx/lharx where we do the native
23682 operation and then do the calcuations in a SImode register. */
23683 if (orig_before)
23684 convert_move (orig_before, before, 1);
23685 if (orig_after)
23686 convert_move (orig_after, after, 1);
23688 else if (orig_after && after != orig_after)
23689 emit_move_insn (orig_after, after);
23692 /* Emit instructions to move SRC to DST. Called by splitters for
23693 multi-register moves. It will emit at most one instruction for
23694 each register that is accessed; that is, it won't emit li/lis pairs
23695 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23696 register. */
23698 void
23699 rs6000_split_multireg_move (rtx dst, rtx src)
23701 /* The register number of the first register being moved. */
23702 int reg;
23703 /* The mode that is to be moved. */
23704 machine_mode mode;
23705 /* The mode that the move is being done in, and its size. */
23706 machine_mode reg_mode;
23707 int reg_mode_size;
23708 /* The number of registers that will be moved. */
23709 int nregs;
23711 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23712 mode = GET_MODE (dst);
23713 nregs = hard_regno_nregs[reg][mode];
23714 if (FP_REGNO_P (reg))
23715 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23716 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23717 else if (ALTIVEC_REGNO_P (reg))
23718 reg_mode = V16QImode;
23719 else
23720 reg_mode = word_mode;
23721 reg_mode_size = GET_MODE_SIZE (reg_mode);
23723 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23725 /* TDmode residing in FP registers is special, since the ISA requires that
23726 the lower-numbered word of a register pair is always the most significant
23727 word, even in little-endian mode. This does not match the usual subreg
23728 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23729 the appropriate constituent registers "by hand" in little-endian mode.
23731 Note we do not need to check for destructive overlap here since TDmode
23732 can only reside in even/odd register pairs. */
23733 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23735 rtx p_src, p_dst;
23736 int i;
23738 for (i = 0; i < nregs; i++)
23740 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23741 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23742 else
23743 p_src = simplify_gen_subreg (reg_mode, src, mode,
23744 i * reg_mode_size);
23746 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23747 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23748 else
23749 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23750 i * reg_mode_size);
23752 emit_insn (gen_rtx_SET (p_dst, p_src));
23755 return;
23758 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23760 /* Move register range backwards, if we might have destructive
23761 overlap. */
23762 int i;
23763 for (i = nregs - 1; i >= 0; i--)
23764 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23765 i * reg_mode_size),
23766 simplify_gen_subreg (reg_mode, src, mode,
23767 i * reg_mode_size)));
23769 else
23771 int i;
23772 int j = -1;
23773 bool used_update = false;
23774 rtx restore_basereg = NULL_RTX;
23776 if (MEM_P (src) && INT_REGNO_P (reg))
23778 rtx breg;
23780 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23781 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23783 rtx delta_rtx;
23784 breg = XEXP (XEXP (src, 0), 0);
23785 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23786 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23787 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23788 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23789 src = replace_equiv_address (src, breg);
23791 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23793 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23795 rtx basereg = XEXP (XEXP (src, 0), 0);
23796 if (TARGET_UPDATE)
23798 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23799 emit_insn (gen_rtx_SET (ndst,
23800 gen_rtx_MEM (reg_mode,
23801 XEXP (src, 0))));
23802 used_update = true;
23804 else
23805 emit_insn (gen_rtx_SET (basereg,
23806 XEXP (XEXP (src, 0), 1)));
23807 src = replace_equiv_address (src, basereg);
23809 else
23811 rtx basereg = gen_rtx_REG (Pmode, reg);
23812 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23813 src = replace_equiv_address (src, basereg);
23817 breg = XEXP (src, 0);
23818 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23819 breg = XEXP (breg, 0);
23821 /* If the base register we are using to address memory is
23822 also a destination reg, then change that register last. */
23823 if (REG_P (breg)
23824 && REGNO (breg) >= REGNO (dst)
23825 && REGNO (breg) < REGNO (dst) + nregs)
23826 j = REGNO (breg) - REGNO (dst);
23828 else if (MEM_P (dst) && INT_REGNO_P (reg))
23830 rtx breg;
23832 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23833 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23835 rtx delta_rtx;
23836 breg = XEXP (XEXP (dst, 0), 0);
23837 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23838 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23839 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23841 /* We have to update the breg before doing the store.
23842 Use store with update, if available. */
23844 if (TARGET_UPDATE)
23846 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23847 emit_insn (TARGET_32BIT
23848 ? (TARGET_POWERPC64
23849 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23850 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23851 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23852 used_update = true;
23854 else
23855 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23856 dst = replace_equiv_address (dst, breg);
23858 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
23859 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23861 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23863 rtx basereg = XEXP (XEXP (dst, 0), 0);
23864 if (TARGET_UPDATE)
23866 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23867 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23868 XEXP (dst, 0)),
23869 nsrc));
23870 used_update = true;
23872 else
23873 emit_insn (gen_rtx_SET (basereg,
23874 XEXP (XEXP (dst, 0), 1)));
23875 dst = replace_equiv_address (dst, basereg);
23877 else
23879 rtx basereg = XEXP (XEXP (dst, 0), 0);
23880 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23881 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23882 && REG_P (basereg)
23883 && REG_P (offsetreg)
23884 && REGNO (basereg) != REGNO (offsetreg));
23885 if (REGNO (basereg) == 0)
23887 rtx tmp = offsetreg;
23888 offsetreg = basereg;
23889 basereg = tmp;
23891 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23892 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23893 dst = replace_equiv_address (dst, basereg);
23896 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23897 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
23900 for (i = 0; i < nregs; i++)
23902 /* Calculate index to next subword. */
23903 ++j;
23904 if (j == nregs)
23905 j = 0;
23907 /* If compiler already emitted move of first word by
23908 store with update, no need to do anything. */
23909 if (j == 0 && used_update)
23910 continue;
23912 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23913 j * reg_mode_size),
23914 simplify_gen_subreg (reg_mode, src, mode,
23915 j * reg_mode_size)));
23917 if (restore_basereg != NULL_RTX)
23918 emit_insn (restore_basereg);
23923 /* This page contains routines that are used to determine what the
23924 function prologue and epilogue code will do and write them out. */
23926 static inline bool
23927 save_reg_p (int r)
23929 return !call_used_regs[r] && df_regs_ever_live_p (r);
23932 /* Determine whether the gp REG is really used. */
23934 static bool
23935 rs6000_reg_live_or_pic_offset_p (int reg)
23937 /* We need to mark the PIC offset register live for the same conditions
23938 as it is set up, or otherwise it won't be saved before we clobber it. */
23940 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23942 if (TARGET_TOC && TARGET_MINIMAL_TOC
23943 && (crtl->calls_eh_return
23944 || df_regs_ever_live_p (reg)
23945 || !constant_pool_empty_p ()))
23946 return true;
23948 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23949 && flag_pic)
23950 return true;
23953 /* If the function calls eh_return, claim used all the registers that would
23954 be checked for liveness otherwise. */
23956 return ((crtl->calls_eh_return || df_regs_ever_live_p (reg))
23957 && !call_used_regs[reg]);
23960 /* Return the first fixed-point register that is required to be
23961 saved. 32 if none. */
23964 first_reg_to_save (void)
23966 int first_reg;
23968 /* Find lowest numbered live register. */
23969 for (first_reg = 13; first_reg <= 31; first_reg++)
23970 if (save_reg_p (first_reg))
23971 break;
23973 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
23974 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
23975 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23976 || (TARGET_TOC && TARGET_MINIMAL_TOC))
23977 && rs6000_reg_live_or_pic_offset_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
23978 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
23980 #if TARGET_MACHO
23981 if (flag_pic
23982 && crtl->uses_pic_offset_table
23983 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
23984 return RS6000_PIC_OFFSET_TABLE_REGNUM;
23985 #endif
23987 return first_reg;
23990 /* Similar, for FP regs. */
23993 first_fp_reg_to_save (void)
23995 int first_reg;
23997 /* Find lowest numbered live register. */
23998 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23999 if (save_reg_p (first_reg))
24000 break;
24002 return first_reg;
24005 /* Similar, for AltiVec regs. */
24007 static int
24008 first_altivec_reg_to_save (void)
24010 int i;
24012 /* Stack frame remains as is unless we are in AltiVec ABI. */
24013 if (! TARGET_ALTIVEC_ABI)
24014 return LAST_ALTIVEC_REGNO + 1;
24016 /* On Darwin, the unwind routines are compiled without
24017 TARGET_ALTIVEC, and use save_world to save/restore the
24018 altivec registers when necessary. */
24019 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24020 && ! TARGET_ALTIVEC)
24021 return FIRST_ALTIVEC_REGNO + 20;
24023 /* Find lowest numbered live register. */
24024 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24025 if (save_reg_p (i))
24026 break;
24028 return i;
24031 /* Return a 32-bit mask of the AltiVec registers we need to set in
24032 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24033 the 32-bit word is 0. */
24035 static unsigned int
24036 compute_vrsave_mask (void)
24038 unsigned int i, mask = 0;
24040 /* On Darwin, the unwind routines are compiled without
24041 TARGET_ALTIVEC, and use save_world to save/restore the
24042 call-saved altivec registers when necessary. */
24043 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24044 && ! TARGET_ALTIVEC)
24045 mask |= 0xFFF;
24047 /* First, find out if we use _any_ altivec registers. */
24048 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24049 if (df_regs_ever_live_p (i))
24050 mask |= ALTIVEC_REG_BIT (i);
24052 if (mask == 0)
24053 return mask;
24055 /* Next, remove the argument registers from the set. These must
24056 be in the VRSAVE mask set by the caller, so we don't need to add
24057 them in again. More importantly, the mask we compute here is
24058 used to generate CLOBBERs in the set_vrsave insn, and we do not
24059 wish the argument registers to die. */
24060 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24061 mask &= ~ALTIVEC_REG_BIT (i);
24063 /* Similarly, remove the return value from the set. */
24065 bool yes = false;
24066 diddle_return_value (is_altivec_return_reg, &yes);
24067 if (yes)
24068 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24071 return mask;
24074 /* For a very restricted set of circumstances, we can cut down the
24075 size of prologues/epilogues by calling our own save/restore-the-world
24076 routines. */
24078 static void
24079 compute_save_world_info (rs6000_stack_t *info)
24081 info->world_save_p = 1;
24082 info->world_save_p
24083 = (WORLD_SAVE_P (info)
24084 && DEFAULT_ABI == ABI_DARWIN
24085 && !cfun->has_nonlocal_label
24086 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24087 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24088 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24089 && info->cr_save_p);
24091 /* This will not work in conjunction with sibcalls. Make sure there
24092 are none. (This check is expensive, but seldom executed.) */
24093 if (WORLD_SAVE_P (info))
24095 rtx_insn *insn;
24096 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24097 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24099 info->world_save_p = 0;
24100 break;
24104 if (WORLD_SAVE_P (info))
24106 /* Even if we're not touching VRsave, make sure there's room on the
24107 stack for it, if it looks like we're calling SAVE_WORLD, which
24108 will attempt to save it. */
24109 info->vrsave_size = 4;
24111 /* If we are going to save the world, we need to save the link register too. */
24112 info->lr_save_p = 1;
24114 /* "Save" the VRsave register too if we're saving the world. */
24115 if (info->vrsave_mask == 0)
24116 info->vrsave_mask = compute_vrsave_mask ();
24118 /* Because the Darwin register save/restore routines only handle
24119 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24120 check. */
24121 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24122 && (info->first_altivec_reg_save
24123 >= FIRST_SAVED_ALTIVEC_REGNO));
24126 return;
24130 static void
24131 is_altivec_return_reg (rtx reg, void *xyes)
24133 bool *yes = (bool *) xyes;
24134 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24135 *yes = true;
24139 /* Return whether REG is a global user reg or has been specifed by
24140 -ffixed-REG. We should not restore these, and so cannot use
24141 lmw or out-of-line restore functions if there are any. We also
24142 can't save them (well, emit frame notes for them), because frame
24143 unwinding during exception handling will restore saved registers. */
24145 static bool
24146 fixed_reg_p (int reg)
24148 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24149 backend sets it, overriding anything the user might have given. */
24150 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24151 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24152 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24153 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24154 return false;
24156 return fixed_regs[reg];
24159 /* Determine the strategy for savings/restoring registers. */
24161 enum {
24162 SAVE_MULTIPLE = 0x1,
24163 SAVE_INLINE_GPRS = 0x2,
24164 SAVE_INLINE_FPRS = 0x4,
24165 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24166 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24167 SAVE_INLINE_VRS = 0x20,
24168 REST_MULTIPLE = 0x100,
24169 REST_INLINE_GPRS = 0x200,
24170 REST_INLINE_FPRS = 0x400,
24171 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24172 REST_INLINE_VRS = 0x1000
24175 static int
24176 rs6000_savres_strategy (rs6000_stack_t *info,
24177 bool using_static_chain_p)
24179 int strategy = 0;
24181 /* Select between in-line and out-of-line save and restore of regs.
24182 First, all the obvious cases where we don't use out-of-line. */
24183 if (crtl->calls_eh_return
24184 || cfun->machine->ra_need_lr)
24185 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24186 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24187 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24189 if (info->first_gp_reg_save == 32)
24190 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24192 if (info->first_fp_reg_save == 64
24193 /* The out-of-line FP routines use double-precision stores;
24194 we can't use those routines if we don't have such stores. */
24195 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24196 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24198 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24199 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24201 /* Define cutoff for using out-of-line functions to save registers. */
24202 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24204 if (!optimize_size)
24206 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24207 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24208 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24210 else
24212 /* Prefer out-of-line restore if it will exit. */
24213 if (info->first_fp_reg_save > 61)
24214 strategy |= SAVE_INLINE_FPRS;
24215 if (info->first_gp_reg_save > 29)
24217 if (info->first_fp_reg_save == 64)
24218 strategy |= SAVE_INLINE_GPRS;
24219 else
24220 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24222 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24223 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24226 else if (DEFAULT_ABI == ABI_DARWIN)
24228 if (info->first_fp_reg_save > 60)
24229 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24230 if (info->first_gp_reg_save > 29)
24231 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24232 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24234 else
24236 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24237 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24238 || info->first_fp_reg_save > 61)
24239 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24240 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24241 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24244 /* Don't bother to try to save things out-of-line if r11 is occupied
24245 by the static chain. It would require too much fiddling and the
24246 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24247 pointer on Darwin, and AIX uses r1 or r12. */
24248 if (using_static_chain_p
24249 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24250 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24251 | SAVE_INLINE_GPRS
24252 | SAVE_INLINE_VRS);
24254 /* We can only use the out-of-line routines to restore fprs if we've
24255 saved all the registers from first_fp_reg_save in the prologue.
24256 Otherwise, we risk loading garbage. Of course, if we have saved
24257 out-of-line then we know we haven't skipped any fprs. */
24258 if ((strategy & SAVE_INLINE_FPRS)
24259 && !(strategy & REST_INLINE_FPRS))
24261 int i;
24263 for (i = info->first_fp_reg_save; i < 64; i++)
24264 if (fixed_regs[i] || !save_reg_p (i))
24266 strategy |= REST_INLINE_FPRS;
24267 break;
24271 /* Similarly, for altivec regs. */
24272 if ((strategy & SAVE_INLINE_VRS)
24273 && !(strategy & REST_INLINE_VRS))
24275 int i;
24277 for (i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24278 if (fixed_regs[i] || !save_reg_p (i))
24280 strategy |= REST_INLINE_VRS;
24281 break;
24285 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24286 saved is an out-of-line save or restore. Set up the value for
24287 the next test (excluding out-of-line gprs). */
24288 bool lr_save_p = (info->lr_save_p
24289 || !(strategy & SAVE_INLINE_FPRS)
24290 || !(strategy & SAVE_INLINE_VRS)
24291 || !(strategy & REST_INLINE_FPRS)
24292 || !(strategy & REST_INLINE_VRS));
24294 if (TARGET_MULTIPLE
24295 && !TARGET_POWERPC64
24296 && info->first_gp_reg_save < 31
24297 && !(flag_shrink_wrap
24298 && flag_shrink_wrap_separate
24299 && optimize_function_for_speed_p (cfun)))
24301 /* Prefer store multiple for saves over out-of-line routines,
24302 since the store-multiple instruction will always be smaller. */
24303 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24305 /* The situation is more complicated with load multiple. We'd
24306 prefer to use the out-of-line routines for restores, since the
24307 "exit" out-of-line routines can handle the restore of LR and the
24308 frame teardown. However if doesn't make sense to use the
24309 out-of-line routine if that is the only reason we'd need to save
24310 LR, and we can't use the "exit" out-of-line gpr restore if we
24311 have saved some fprs; In those cases it is advantageous to use
24312 load multiple when available. */
24313 if (info->first_fp_reg_save != 64 || !lr_save_p)
24314 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24317 /* Using the "exit" out-of-line routine does not improve code size
24318 if using it would require lr to be saved and if only saving one
24319 or two gprs. */
24320 else if (!lr_save_p && info->first_gp_reg_save > 29)
24321 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24323 /* We can only use load multiple or the out-of-line routines to
24324 restore gprs if we've saved all the registers from
24325 first_gp_reg_save. Otherwise, we risk loading garbage.
24326 Of course, if we have saved out-of-line or used stmw then we know
24327 we haven't skipped any gprs. */
24328 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24329 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24331 int i;
24333 for (i = info->first_gp_reg_save; i < 32; i++)
24334 if (fixed_reg_p (i) || !save_reg_p (i))
24336 strategy |= REST_INLINE_GPRS;
24337 strategy &= ~REST_MULTIPLE;
24338 break;
24342 if (TARGET_ELF && TARGET_64BIT)
24344 if (!(strategy & SAVE_INLINE_FPRS))
24345 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24346 else if (!(strategy & SAVE_INLINE_GPRS)
24347 && info->first_fp_reg_save == 64)
24348 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24350 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24351 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24353 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24354 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24356 return strategy;
24359 /* Calculate the stack information for the current function. This is
24360 complicated by having two separate calling sequences, the AIX calling
24361 sequence and the V.4 calling sequence.
24363 AIX (and Darwin/Mac OS X) stack frames look like:
24364 32-bit 64-bit
24365 SP----> +---------------------------------------+
24366 | back chain to caller | 0 0
24367 +---------------------------------------+
24368 | saved CR | 4 8 (8-11)
24369 +---------------------------------------+
24370 | saved LR | 8 16
24371 +---------------------------------------+
24372 | reserved for compilers | 12 24
24373 +---------------------------------------+
24374 | reserved for binders | 16 32
24375 +---------------------------------------+
24376 | saved TOC pointer | 20 40
24377 +---------------------------------------+
24378 | Parameter save area (+padding*) (P) | 24 48
24379 +---------------------------------------+
24380 | Alloca space (A) | 24+P etc.
24381 +---------------------------------------+
24382 | Local variable space (L) | 24+P+A
24383 +---------------------------------------+
24384 | Float/int conversion temporary (X) | 24+P+A+L
24385 +---------------------------------------+
24386 | Save area for AltiVec registers (W) | 24+P+A+L+X
24387 +---------------------------------------+
24388 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24389 +---------------------------------------+
24390 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24391 +---------------------------------------+
24392 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24393 +---------------------------------------+
24394 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24395 +---------------------------------------+
24396 old SP->| back chain to caller's caller |
24397 +---------------------------------------+
24399 * If the alloca area is present, the parameter save area is
24400 padded so that the former starts 16-byte aligned.
24402 The required alignment for AIX configurations is two words (i.e., 8
24403 or 16 bytes).
24405 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24407 SP----> +---------------------------------------+
24408 | Back chain to caller | 0
24409 +---------------------------------------+
24410 | Save area for CR | 8
24411 +---------------------------------------+
24412 | Saved LR | 16
24413 +---------------------------------------+
24414 | Saved TOC pointer | 24
24415 +---------------------------------------+
24416 | Parameter save area (+padding*) (P) | 32
24417 +---------------------------------------+
24418 | Alloca space (A) | 32+P
24419 +---------------------------------------+
24420 | Local variable space (L) | 32+P+A
24421 +---------------------------------------+
24422 | Save area for AltiVec registers (W) | 32+P+A+L
24423 +---------------------------------------+
24424 | AltiVec alignment padding (Y) | 32+P+A+L+W
24425 +---------------------------------------+
24426 | Save area for GP registers (G) | 32+P+A+L+W+Y
24427 +---------------------------------------+
24428 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24429 +---------------------------------------+
24430 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24431 +---------------------------------------+
24433 * If the alloca area is present, the parameter save area is
24434 padded so that the former starts 16-byte aligned.
24436 V.4 stack frames look like:
24438 SP----> +---------------------------------------+
24439 | back chain to caller | 0
24440 +---------------------------------------+
24441 | caller's saved LR | 4
24442 +---------------------------------------+
24443 | Parameter save area (+padding*) (P) | 8
24444 +---------------------------------------+
24445 | Alloca space (A) | 8+P
24446 +---------------------------------------+
24447 | Varargs save area (V) | 8+P+A
24448 +---------------------------------------+
24449 | Local variable space (L) | 8+P+A+V
24450 +---------------------------------------+
24451 | Float/int conversion temporary (X) | 8+P+A+V+L
24452 +---------------------------------------+
24453 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24454 +---------------------------------------+
24455 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24456 +---------------------------------------+
24457 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24458 +---------------------------------------+
24459 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24460 +---------------------------------------+
24461 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24462 +---------------------------------------+
24463 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24464 +---------------------------------------+
24465 old SP->| back chain to caller's caller |
24466 +---------------------------------------+
24468 * If the alloca area is present and the required alignment is
24469 16 bytes, the parameter save area is padded so that the
24470 alloca area starts 16-byte aligned.
24472 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24473 given. (But note below and in sysv4.h that we require only 8 and
24474 may round up the size of our stack frame anyways. The historical
24475 reason is early versions of powerpc-linux which didn't properly
24476 align the stack at program startup. A happy side-effect is that
24477 -mno-eabi libraries can be used with -meabi programs.)
24479 The EABI configuration defaults to the V.4 layout. However,
24480 the stack alignment requirements may differ. If -mno-eabi is not
24481 given, the required stack alignment is 8 bytes; if -mno-eabi is
24482 given, the required alignment is 16 bytes. (But see V.4 comment
24483 above.) */
24485 #ifndef ABI_STACK_BOUNDARY
24486 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24487 #endif
24489 static rs6000_stack_t *
24490 rs6000_stack_info (void)
24492 /* We should never be called for thunks, we are not set up for that. */
24493 gcc_assert (!cfun->is_thunk);
24495 rs6000_stack_t *info = &stack_info;
24496 int reg_size = TARGET_32BIT ? 4 : 8;
24497 int ehrd_size;
24498 int ehcr_size;
24499 int save_align;
24500 int first_gp;
24501 HOST_WIDE_INT non_fixed_size;
24502 bool using_static_chain_p;
24504 if (reload_completed && info->reload_completed)
24505 return info;
24507 memset (info, 0, sizeof (*info));
24508 info->reload_completed = reload_completed;
24510 /* Select which calling sequence. */
24511 info->abi = DEFAULT_ABI;
24513 /* Calculate which registers need to be saved & save area size. */
24514 info->first_gp_reg_save = first_reg_to_save ();
24515 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24516 even if it currently looks like we won't. Reload may need it to
24517 get at a constant; if so, it will have already created a constant
24518 pool entry for it. */
24519 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24520 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24521 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24522 && crtl->uses_const_pool
24523 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24524 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24525 else
24526 first_gp = info->first_gp_reg_save;
24528 info->gp_size = reg_size * (32 - first_gp);
24530 info->first_fp_reg_save = first_fp_reg_to_save ();
24531 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24533 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24534 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24535 - info->first_altivec_reg_save);
24537 /* Does this function call anything? */
24538 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24540 /* Determine if we need to save the condition code registers. */
24541 if (save_reg_p (CR2_REGNO)
24542 || save_reg_p (CR3_REGNO)
24543 || save_reg_p (CR4_REGNO))
24545 info->cr_save_p = 1;
24546 if (DEFAULT_ABI == ABI_V4)
24547 info->cr_size = reg_size;
24550 /* If the current function calls __builtin_eh_return, then we need
24551 to allocate stack space for registers that will hold data for
24552 the exception handler. */
24553 if (crtl->calls_eh_return)
24555 unsigned int i;
24556 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24557 continue;
24559 ehrd_size = i * UNITS_PER_WORD;
24561 else
24562 ehrd_size = 0;
24564 /* In the ELFv2 ABI, we also need to allocate space for separate
24565 CR field save areas if the function calls __builtin_eh_return. */
24566 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24568 /* This hard-codes that we have three call-saved CR fields. */
24569 ehcr_size = 3 * reg_size;
24570 /* We do *not* use the regular CR save mechanism. */
24571 info->cr_save_p = 0;
24573 else
24574 ehcr_size = 0;
24576 /* Determine various sizes. */
24577 info->reg_size = reg_size;
24578 info->fixed_size = RS6000_SAVE_AREA;
24579 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24580 if (cfun->calls_alloca)
24581 info->parm_size =
24582 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24583 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24584 else
24585 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24586 TARGET_ALTIVEC ? 16 : 8);
24587 if (FRAME_GROWS_DOWNWARD)
24588 info->vars_size
24589 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24590 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24591 - (info->fixed_size + info->vars_size + info->parm_size);
24593 if (TARGET_ALTIVEC_ABI)
24594 info->vrsave_mask = compute_vrsave_mask ();
24596 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24597 info->vrsave_size = 4;
24599 compute_save_world_info (info);
24601 /* Calculate the offsets. */
24602 switch (DEFAULT_ABI)
24604 case ABI_NONE:
24605 default:
24606 gcc_unreachable ();
24608 case ABI_AIX:
24609 case ABI_ELFv2:
24610 case ABI_DARWIN:
24611 info->fp_save_offset = -info->fp_size;
24612 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24614 if (TARGET_ALTIVEC_ABI)
24616 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24618 /* Align stack so vector save area is on a quadword boundary.
24619 The padding goes above the vectors. */
24620 if (info->altivec_size != 0)
24621 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24623 info->altivec_save_offset = info->vrsave_save_offset
24624 - info->altivec_padding_size
24625 - info->altivec_size;
24626 gcc_assert (info->altivec_size == 0
24627 || info->altivec_save_offset % 16 == 0);
24629 /* Adjust for AltiVec case. */
24630 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24632 else
24633 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24635 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24636 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24637 info->lr_save_offset = 2*reg_size;
24638 break;
24640 case ABI_V4:
24641 info->fp_save_offset = -info->fp_size;
24642 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24643 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24645 if (TARGET_ALTIVEC_ABI)
24647 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24649 /* Align stack so vector save area is on a quadword boundary. */
24650 if (info->altivec_size != 0)
24651 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24653 info->altivec_save_offset = info->vrsave_save_offset
24654 - info->altivec_padding_size
24655 - info->altivec_size;
24657 /* Adjust for AltiVec case. */
24658 info->ehrd_offset = info->altivec_save_offset;
24660 else
24661 info->ehrd_offset = info->cr_save_offset;
24663 info->ehrd_offset -= ehrd_size;
24664 info->lr_save_offset = reg_size;
24667 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24668 info->save_size = RS6000_ALIGN (info->fp_size
24669 + info->gp_size
24670 + info->altivec_size
24671 + info->altivec_padding_size
24672 + ehrd_size
24673 + ehcr_size
24674 + info->cr_size
24675 + info->vrsave_size,
24676 save_align);
24678 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24680 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24681 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24683 /* Determine if we need to save the link register. */
24684 if (info->calls_p
24685 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24686 && crtl->profile
24687 && !TARGET_PROFILE_KERNEL)
24688 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24689 #ifdef TARGET_RELOCATABLE
24690 || (DEFAULT_ABI == ABI_V4
24691 && (TARGET_RELOCATABLE || flag_pic > 1)
24692 && !constant_pool_empty_p ())
24693 #endif
24694 || rs6000_ra_ever_killed ())
24695 info->lr_save_p = 1;
24697 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24698 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24699 && call_used_regs[STATIC_CHAIN_REGNUM]);
24700 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24702 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24703 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24704 || !(info->savres_strategy & SAVE_INLINE_VRS)
24705 || !(info->savres_strategy & REST_INLINE_GPRS)
24706 || !(info->savres_strategy & REST_INLINE_FPRS)
24707 || !(info->savres_strategy & REST_INLINE_VRS))
24708 info->lr_save_p = 1;
24710 if (info->lr_save_p)
24711 df_set_regs_ever_live (LR_REGNO, true);
24713 /* Determine if we need to allocate any stack frame:
24715 For AIX we need to push the stack if a frame pointer is needed
24716 (because the stack might be dynamically adjusted), if we are
24717 debugging, if we make calls, or if the sum of fp_save, gp_save,
24718 and local variables are more than the space needed to save all
24719 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24720 + 18*8 = 288 (GPR13 reserved).
24722 For V.4 we don't have the stack cushion that AIX uses, but assume
24723 that the debugger can handle stackless frames. */
24725 if (info->calls_p)
24726 info->push_p = 1;
24728 else if (DEFAULT_ABI == ABI_V4)
24729 info->push_p = non_fixed_size != 0;
24731 else if (frame_pointer_needed)
24732 info->push_p = 1;
24734 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24735 info->push_p = 1;
24737 else
24738 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24740 return info;
24743 static void
24744 debug_stack_info (rs6000_stack_t *info)
24746 const char *abi_string;
24748 if (! info)
24749 info = rs6000_stack_info ();
24751 fprintf (stderr, "\nStack information for function %s:\n",
24752 ((current_function_decl && DECL_NAME (current_function_decl))
24753 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24754 : "<unknown>"));
24756 switch (info->abi)
24758 default: abi_string = "Unknown"; break;
24759 case ABI_NONE: abi_string = "NONE"; break;
24760 case ABI_AIX: abi_string = "AIX"; break;
24761 case ABI_ELFv2: abi_string = "ELFv2"; break;
24762 case ABI_DARWIN: abi_string = "Darwin"; break;
24763 case ABI_V4: abi_string = "V.4"; break;
24766 fprintf (stderr, "\tABI = %5s\n", abi_string);
24768 if (TARGET_ALTIVEC_ABI)
24769 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24771 if (info->first_gp_reg_save != 32)
24772 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24774 if (info->first_fp_reg_save != 64)
24775 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24777 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24778 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24779 info->first_altivec_reg_save);
24781 if (info->lr_save_p)
24782 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24784 if (info->cr_save_p)
24785 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24787 if (info->vrsave_mask)
24788 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24790 if (info->push_p)
24791 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24793 if (info->calls_p)
24794 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24796 if (info->gp_size)
24797 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24799 if (info->fp_size)
24800 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24802 if (info->altivec_size)
24803 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24804 info->altivec_save_offset);
24806 if (info->vrsave_size)
24807 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24808 info->vrsave_save_offset);
24810 if (info->lr_save_p)
24811 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24813 if (info->cr_save_p)
24814 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24816 if (info->varargs_save_offset)
24817 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24819 if (info->total_size)
24820 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24821 info->total_size);
24823 if (info->vars_size)
24824 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24825 info->vars_size);
24827 if (info->parm_size)
24828 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24830 if (info->fixed_size)
24831 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24833 if (info->gp_size)
24834 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24836 if (info->fp_size)
24837 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24839 if (info->altivec_size)
24840 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24842 if (info->vrsave_size)
24843 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24845 if (info->altivec_padding_size)
24846 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24847 info->altivec_padding_size);
24849 if (info->cr_size)
24850 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24852 if (info->save_size)
24853 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24855 if (info->reg_size != 4)
24856 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24858 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24860 fprintf (stderr, "\n");
24864 rs6000_return_addr (int count, rtx frame)
24866 /* Currently we don't optimize very well between prolog and body
24867 code and for PIC code the code can be actually quite bad, so
24868 don't try to be too clever here. */
24869 if (count != 0
24870 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24872 cfun->machine->ra_needs_full_frame = 1;
24874 return
24875 gen_rtx_MEM
24876 (Pmode,
24877 memory_address
24878 (Pmode,
24879 plus_constant (Pmode,
24880 copy_to_reg
24881 (gen_rtx_MEM (Pmode,
24882 memory_address (Pmode, frame))),
24883 RETURN_ADDRESS_OFFSET)));
24886 cfun->machine->ra_need_lr = 1;
24887 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24890 /* Say whether a function is a candidate for sibcall handling or not. */
24892 static bool
24893 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24895 tree fntype;
24897 if (decl)
24898 fntype = TREE_TYPE (decl);
24899 else
24900 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24902 /* We can't do it if the called function has more vector parameters
24903 than the current function; there's nowhere to put the VRsave code. */
24904 if (TARGET_ALTIVEC_ABI
24905 && TARGET_ALTIVEC_VRSAVE
24906 && !(decl && decl == current_function_decl))
24908 function_args_iterator args_iter;
24909 tree type;
24910 int nvreg = 0;
24912 /* Functions with vector parameters are required to have a
24913 prototype, so the argument type info must be available
24914 here. */
24915 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24916 if (TREE_CODE (type) == VECTOR_TYPE
24917 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24918 nvreg++;
24920 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24921 if (TREE_CODE (type) == VECTOR_TYPE
24922 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24923 nvreg--;
24925 if (nvreg > 0)
24926 return false;
24929 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24930 functions, because the callee may have a different TOC pointer to
24931 the caller and there's no way to ensure we restore the TOC when
24932 we return. With the secure-plt SYSV ABI we can't make non-local
24933 calls when -fpic/PIC because the plt call stubs use r30. */
24934 if (DEFAULT_ABI == ABI_DARWIN
24935 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24936 && decl
24937 && !DECL_EXTERNAL (decl)
24938 && !DECL_WEAK (decl)
24939 && (*targetm.binds_local_p) (decl))
24940 || (DEFAULT_ABI == ABI_V4
24941 && (!TARGET_SECURE_PLT
24942 || !flag_pic
24943 || (decl
24944 && (*targetm.binds_local_p) (decl)))))
24946 tree attr_list = TYPE_ATTRIBUTES (fntype);
24948 if (!lookup_attribute ("longcall", attr_list)
24949 || lookup_attribute ("shortcall", attr_list))
24950 return true;
24953 return false;
24956 static int
24957 rs6000_ra_ever_killed (void)
24959 rtx_insn *top;
24960 rtx reg;
24961 rtx_insn *insn;
24963 if (cfun->is_thunk)
24964 return 0;
24966 if (cfun->machine->lr_save_state)
24967 return cfun->machine->lr_save_state - 1;
24969 /* regs_ever_live has LR marked as used if any sibcalls are present,
24970 but this should not force saving and restoring in the
24971 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24972 clobbers LR, so that is inappropriate. */
24974 /* Also, the prologue can generate a store into LR that
24975 doesn't really count, like this:
24977 move LR->R0
24978 bcl to set PIC register
24979 move LR->R31
24980 move R0->LR
24982 When we're called from the epilogue, we need to avoid counting
24983 this as a store. */
24985 push_topmost_sequence ();
24986 top = get_insns ();
24987 pop_topmost_sequence ();
24988 reg = gen_rtx_REG (Pmode, LR_REGNO);
24990 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24992 if (INSN_P (insn))
24994 if (CALL_P (insn))
24996 if (!SIBLING_CALL_P (insn))
24997 return 1;
24999 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25000 return 1;
25001 else if (set_of (reg, insn) != NULL_RTX
25002 && !prologue_epilogue_contains (insn))
25003 return 1;
25006 return 0;
25009 /* Emit instructions needed to load the TOC register.
25010 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25011 a constant pool; or for SVR4 -fpic. */
25013 void
25014 rs6000_emit_load_toc_table (int fromprolog)
25016 rtx dest;
25017 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25019 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25021 char buf[30];
25022 rtx lab, tmp1, tmp2, got;
25024 lab = gen_label_rtx ();
25025 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25026 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25027 if (flag_pic == 2)
25029 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25030 need_toc_init = 1;
25032 else
25033 got = rs6000_got_sym ();
25034 tmp1 = tmp2 = dest;
25035 if (!fromprolog)
25037 tmp1 = gen_reg_rtx (Pmode);
25038 tmp2 = gen_reg_rtx (Pmode);
25040 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25041 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25042 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25043 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25045 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25047 emit_insn (gen_load_toc_v4_pic_si ());
25048 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25050 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25052 char buf[30];
25053 rtx temp0 = (fromprolog
25054 ? gen_rtx_REG (Pmode, 0)
25055 : gen_reg_rtx (Pmode));
25057 if (fromprolog)
25059 rtx symF, symL;
25061 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25062 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25064 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25065 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25067 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25068 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25069 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25071 else
25073 rtx tocsym, lab;
25075 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25076 need_toc_init = 1;
25077 lab = gen_label_rtx ();
25078 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25079 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25080 if (TARGET_LINK_STACK)
25081 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25082 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25084 emit_insn (gen_addsi3 (dest, temp0, dest));
25086 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25088 /* This is for AIX code running in non-PIC ELF32. */
25089 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25091 need_toc_init = 1;
25092 emit_insn (gen_elf_high (dest, realsym));
25093 emit_insn (gen_elf_low (dest, dest, realsym));
25095 else
25097 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25099 if (TARGET_32BIT)
25100 emit_insn (gen_load_toc_aix_si (dest));
25101 else
25102 emit_insn (gen_load_toc_aix_di (dest));
25106 /* Emit instructions to restore the link register after determining where
25107 its value has been stored. */
25109 void
25110 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25112 rs6000_stack_t *info = rs6000_stack_info ();
25113 rtx operands[2];
25115 operands[0] = source;
25116 operands[1] = scratch;
25118 if (info->lr_save_p)
25120 rtx frame_rtx = stack_pointer_rtx;
25121 HOST_WIDE_INT sp_offset = 0;
25122 rtx tmp;
25124 if (frame_pointer_needed
25125 || cfun->calls_alloca
25126 || info->total_size > 32767)
25128 tmp = gen_frame_mem (Pmode, frame_rtx);
25129 emit_move_insn (operands[1], tmp);
25130 frame_rtx = operands[1];
25132 else if (info->push_p)
25133 sp_offset = info->total_size;
25135 tmp = plus_constant (Pmode, frame_rtx,
25136 info->lr_save_offset + sp_offset);
25137 tmp = gen_frame_mem (Pmode, tmp);
25138 emit_move_insn (tmp, operands[0]);
25140 else
25141 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25143 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25144 state of lr_save_p so any change from here on would be a bug. In
25145 particular, stop rs6000_ra_ever_killed from considering the SET
25146 of lr we may have added just above. */
25147 cfun->machine->lr_save_state = info->lr_save_p + 1;
25150 static GTY(()) alias_set_type set = -1;
25152 alias_set_type
25153 get_TOC_alias_set (void)
25155 if (set == -1)
25156 set = new_alias_set ();
25157 return set;
25160 /* This returns nonzero if the current function uses the TOC. This is
25161 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25162 is generated by the ABI_V4 load_toc_* patterns. */
25163 #if TARGET_ELF
25164 static int
25165 uses_TOC (void)
25167 rtx_insn *insn;
25169 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25170 if (INSN_P (insn))
25172 rtx pat = PATTERN (insn);
25173 int i;
25175 if (GET_CODE (pat) == PARALLEL)
25176 for (i = 0; i < XVECLEN (pat, 0); i++)
25178 rtx sub = XVECEXP (pat, 0, i);
25179 if (GET_CODE (sub) == USE)
25181 sub = XEXP (sub, 0);
25182 if (GET_CODE (sub) == UNSPEC
25183 && XINT (sub, 1) == UNSPEC_TOC)
25184 return 1;
25188 return 0;
25190 #endif
25193 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25195 rtx tocrel, tocreg, hi;
25197 if (TARGET_DEBUG_ADDR)
25199 if (GET_CODE (symbol) == SYMBOL_REF)
25200 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25201 XSTR (symbol, 0));
25202 else
25204 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25205 GET_RTX_NAME (GET_CODE (symbol)));
25206 debug_rtx (symbol);
25210 if (!can_create_pseudo_p ())
25211 df_set_regs_ever_live (TOC_REGISTER, true);
25213 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25214 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25215 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25216 return tocrel;
25218 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25219 if (largetoc_reg != NULL)
25221 emit_move_insn (largetoc_reg, hi);
25222 hi = largetoc_reg;
25224 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25227 /* Issue assembly directives that create a reference to the given DWARF
25228 FRAME_TABLE_LABEL from the current function section. */
25229 void
25230 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25232 fprintf (asm_out_file, "\t.ref %s\n",
25233 (* targetm.strip_name_encoding) (frame_table_label));
25236 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25237 and the change to the stack pointer. */
25239 static void
25240 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25242 rtvec p;
25243 int i;
25244 rtx regs[3];
25246 i = 0;
25247 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25248 if (hard_frame_needed)
25249 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25250 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25251 || (hard_frame_needed
25252 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25253 regs[i++] = fp;
25255 p = rtvec_alloc (i);
25256 while (--i >= 0)
25258 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25259 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25262 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25265 /* Emit the correct code for allocating stack space, as insns.
25266 If COPY_REG, make sure a copy of the old frame is left there.
25267 The generated code may use hard register 0 as a temporary. */
25269 static rtx_insn *
25270 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25272 rtx_insn *insn;
25273 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25274 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25275 rtx todec = gen_int_mode (-size, Pmode);
25276 rtx par, set, mem;
25278 if (INTVAL (todec) != -size)
25280 warning (0, "stack frame too large");
25281 emit_insn (gen_trap ());
25282 return 0;
25285 if (crtl->limit_stack)
25287 if (REG_P (stack_limit_rtx)
25288 && REGNO (stack_limit_rtx) > 1
25289 && REGNO (stack_limit_rtx) <= 31)
25291 rtx_insn *insn
25292 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25293 gcc_assert (insn);
25294 emit_insn (insn);
25295 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25297 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25298 && TARGET_32BIT
25299 && DEFAULT_ABI == ABI_V4
25300 && !flag_pic)
25302 rtx toload = gen_rtx_CONST (VOIDmode,
25303 gen_rtx_PLUS (Pmode,
25304 stack_limit_rtx,
25305 GEN_INT (size)));
25307 emit_insn (gen_elf_high (tmp_reg, toload));
25308 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25309 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25310 const0_rtx));
25312 else
25313 warning (0, "stack limit expression is not supported");
25316 if (copy_reg)
25318 if (copy_off != 0)
25319 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25320 else
25321 emit_move_insn (copy_reg, stack_reg);
25324 if (size > 32767)
25326 /* Need a note here so that try_split doesn't get confused. */
25327 if (get_last_insn () == NULL_RTX)
25328 emit_note (NOTE_INSN_DELETED);
25329 insn = emit_move_insn (tmp_reg, todec);
25330 try_split (PATTERN (insn), insn, 0);
25331 todec = tmp_reg;
25334 insn = emit_insn (TARGET_32BIT
25335 ? gen_movsi_update_stack (stack_reg, stack_reg,
25336 todec, stack_reg)
25337 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25338 todec, stack_reg));
25339 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25340 it now and set the alias set/attributes. The above gen_*_update
25341 calls will generate a PARALLEL with the MEM set being the first
25342 operation. */
25343 par = PATTERN (insn);
25344 gcc_assert (GET_CODE (par) == PARALLEL);
25345 set = XVECEXP (par, 0, 0);
25346 gcc_assert (GET_CODE (set) == SET);
25347 mem = SET_DEST (set);
25348 gcc_assert (MEM_P (mem));
25349 MEM_NOTRAP_P (mem) = 1;
25350 set_mem_alias_set (mem, get_frame_alias_set ());
25352 RTX_FRAME_RELATED_P (insn) = 1;
25353 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25354 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25355 GEN_INT (-size))));
25356 return insn;
25359 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25361 #if PROBE_INTERVAL > 32768
25362 #error Cannot use indexed addressing mode for stack probing
25363 #endif
25365 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25366 inclusive. These are offsets from the current stack pointer. */
25368 static void
25369 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25371 /* See if we have a constant small number of probes to generate. If so,
25372 that's the easy case. */
25373 if (first + size <= 32768)
25375 HOST_WIDE_INT i;
25377 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25378 it exceeds SIZE. If only one probe is needed, this will not
25379 generate any code. Then probe at FIRST + SIZE. */
25380 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25381 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25382 -(first + i)));
25384 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25385 -(first + size)));
25388 /* Otherwise, do the same as above, but in a loop. Note that we must be
25389 extra careful with variables wrapping around because we might be at
25390 the very top (or the very bottom) of the address space and we have
25391 to be able to handle this case properly; in particular, we use an
25392 equality test for the loop condition. */
25393 else
25395 HOST_WIDE_INT rounded_size;
25396 rtx r12 = gen_rtx_REG (Pmode, 12);
25397 rtx r0 = gen_rtx_REG (Pmode, 0);
25399 /* Sanity check for the addressing mode we're going to use. */
25400 gcc_assert (first <= 32768);
25402 /* Step 1: round SIZE to the previous multiple of the interval. */
25404 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25407 /* Step 2: compute initial and final value of the loop counter. */
25409 /* TEST_ADDR = SP + FIRST. */
25410 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25411 -first)));
25413 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25414 if (rounded_size > 32768)
25416 emit_move_insn (r0, GEN_INT (-rounded_size));
25417 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25419 else
25420 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25421 -rounded_size)));
25424 /* Step 3: the loop
25428 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25429 probe at TEST_ADDR
25431 while (TEST_ADDR != LAST_ADDR)
25433 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25434 until it is equal to ROUNDED_SIZE. */
25436 if (TARGET_64BIT)
25437 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25438 else
25439 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25442 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25443 that SIZE is equal to ROUNDED_SIZE. */
25445 if (size != rounded_size)
25446 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25450 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25451 absolute addresses. */
25453 const char *
25454 output_probe_stack_range (rtx reg1, rtx reg2)
25456 static int labelno = 0;
25457 char loop_lab[32];
25458 rtx xops[2];
25460 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25462 /* Loop. */
25463 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25465 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25466 xops[0] = reg1;
25467 xops[1] = GEN_INT (-PROBE_INTERVAL);
25468 output_asm_insn ("addi %0,%0,%1", xops);
25470 /* Probe at TEST_ADDR. */
25471 xops[1] = gen_rtx_REG (Pmode, 0);
25472 output_asm_insn ("stw %1,0(%0)", xops);
25474 /* Test if TEST_ADDR == LAST_ADDR. */
25475 xops[1] = reg2;
25476 if (TARGET_64BIT)
25477 output_asm_insn ("cmpd 0,%0,%1", xops);
25478 else
25479 output_asm_insn ("cmpw 0,%0,%1", xops);
25481 /* Branch. */
25482 fputs ("\tbne 0,", asm_out_file);
25483 assemble_name_raw (asm_out_file, loop_lab);
25484 fputc ('\n', asm_out_file);
25486 return "";
25489 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25490 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25491 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25492 deduce these equivalences by itself so it wasn't necessary to hold
25493 its hand so much. Don't be tempted to always supply d2_f_d_e with
25494 the actual cfa register, ie. r31 when we are using a hard frame
25495 pointer. That fails when saving regs off r1, and sched moves the
25496 r31 setup past the reg saves. */
25498 static rtx_insn *
25499 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25500 rtx reg2, rtx repl2)
25502 rtx repl;
25504 if (REGNO (reg) == STACK_POINTER_REGNUM)
25506 gcc_checking_assert (val == 0);
25507 repl = NULL_RTX;
25509 else
25510 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25511 GEN_INT (val));
25513 rtx pat = PATTERN (insn);
25514 if (!repl && !reg2)
25516 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25517 if (GET_CODE (pat) == PARALLEL)
25518 for (int i = 0; i < XVECLEN (pat, 0); i++)
25519 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25521 rtx set = XVECEXP (pat, 0, i);
25523 /* If this PARALLEL has been emitted for out-of-line
25524 register save functions, or store multiple, then omit
25525 eh_frame info for any user-defined global regs. If
25526 eh_frame info is supplied, frame unwinding will
25527 restore a user reg. */
25528 if (!REG_P (SET_SRC (set))
25529 || !fixed_reg_p (REGNO (SET_SRC (set))))
25530 RTX_FRAME_RELATED_P (set) = 1;
25532 RTX_FRAME_RELATED_P (insn) = 1;
25533 return insn;
25536 /* We expect that 'pat' is either a SET or a PARALLEL containing
25537 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25538 are important so they all have to be marked RTX_FRAME_RELATED_P.
25539 Call simplify_replace_rtx on the SETs rather than the whole insn
25540 so as to leave the other stuff alone (for example USE of r12). */
25542 set_used_flags (pat);
25543 if (GET_CODE (pat) == SET)
25545 if (repl)
25546 pat = simplify_replace_rtx (pat, reg, repl);
25547 if (reg2)
25548 pat = simplify_replace_rtx (pat, reg2, repl2);
25550 else if (GET_CODE (pat) == PARALLEL)
25552 pat = shallow_copy_rtx (pat);
25553 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25555 for (int i = 0; i < XVECLEN (pat, 0); i++)
25556 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25558 rtx set = XVECEXP (pat, 0, i);
25560 if (repl)
25561 set = simplify_replace_rtx (set, reg, repl);
25562 if (reg2)
25563 set = simplify_replace_rtx (set, reg2, repl2);
25564 XVECEXP (pat, 0, i) = set;
25566 /* Omit eh_frame info for any user-defined global regs. */
25567 if (!REG_P (SET_SRC (set))
25568 || !fixed_reg_p (REGNO (SET_SRC (set))))
25569 RTX_FRAME_RELATED_P (set) = 1;
25572 else
25573 gcc_unreachable ();
25575 RTX_FRAME_RELATED_P (insn) = 1;
25576 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25578 return insn;
25581 /* Returns an insn that has a vrsave set operation with the
25582 appropriate CLOBBERs. */
25584 static rtx
25585 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25587 int nclobs, i;
25588 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25589 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25591 clobs[0]
25592 = gen_rtx_SET (vrsave,
25593 gen_rtx_UNSPEC_VOLATILE (SImode,
25594 gen_rtvec (2, reg, vrsave),
25595 UNSPECV_SET_VRSAVE));
25597 nclobs = 1;
25599 /* We need to clobber the registers in the mask so the scheduler
25600 does not move sets to VRSAVE before sets of AltiVec registers.
25602 However, if the function receives nonlocal gotos, reload will set
25603 all call saved registers live. We will end up with:
25605 (set (reg 999) (mem))
25606 (parallel [ (set (reg vrsave) (unspec blah))
25607 (clobber (reg 999))])
25609 The clobber will cause the store into reg 999 to be dead, and
25610 flow will attempt to delete an epilogue insn. In this case, we
25611 need an unspec use/set of the register. */
25613 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25614 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25616 if (!epiloguep || call_used_regs [i])
25617 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25618 gen_rtx_REG (V4SImode, i));
25619 else
25621 rtx reg = gen_rtx_REG (V4SImode, i);
25623 clobs[nclobs++]
25624 = gen_rtx_SET (reg,
25625 gen_rtx_UNSPEC (V4SImode,
25626 gen_rtvec (1, reg), 27));
25630 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25632 for (i = 0; i < nclobs; ++i)
25633 XVECEXP (insn, 0, i) = clobs[i];
25635 return insn;
25638 static rtx
25639 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25641 rtx addr, mem;
25643 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25644 mem = gen_frame_mem (GET_MODE (reg), addr);
25645 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25648 static rtx
25649 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25651 return gen_frame_set (reg, frame_reg, offset, false);
25654 static rtx
25655 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25657 return gen_frame_set (reg, frame_reg, offset, true);
25660 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25661 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25663 static rtx_insn *
25664 emit_frame_save (rtx frame_reg, machine_mode mode,
25665 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25667 rtx reg;
25669 /* Some cases that need register indexed addressing. */
25670 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25671 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25673 reg = gen_rtx_REG (mode, regno);
25674 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25675 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25676 NULL_RTX, NULL_RTX);
25679 /* Emit an offset memory reference suitable for a frame store, while
25680 converting to a valid addressing mode. */
25682 static rtx
25683 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25685 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25688 #ifndef TARGET_FIX_AND_CONTINUE
25689 #define TARGET_FIX_AND_CONTINUE 0
25690 #endif
25692 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25693 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25694 #define LAST_SAVRES_REGISTER 31
25695 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25697 enum {
25698 SAVRES_LR = 0x1,
25699 SAVRES_SAVE = 0x2,
25700 SAVRES_REG = 0x0c,
25701 SAVRES_GPR = 0,
25702 SAVRES_FPR = 4,
25703 SAVRES_VR = 8
25706 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25708 /* Temporary holding space for an out-of-line register save/restore
25709 routine name. */
25710 static char savres_routine_name[30];
25712 /* Return the name for an out-of-line register save/restore routine.
25713 We are saving/restoring GPRs if GPR is true. */
25715 static char *
25716 rs6000_savres_routine_name (int regno, int sel)
25718 const char *prefix = "";
25719 const char *suffix = "";
25721 /* Different targets are supposed to define
25722 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25723 routine name could be defined with:
25725 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25727 This is a nice idea in practice, but in reality, things are
25728 complicated in several ways:
25730 - ELF targets have save/restore routines for GPRs.
25732 - PPC64 ELF targets have routines for save/restore of GPRs that
25733 differ in what they do with the link register, so having a set
25734 prefix doesn't work. (We only use one of the save routines at
25735 the moment, though.)
25737 - PPC32 elf targets have "exit" versions of the restore routines
25738 that restore the link register and can save some extra space.
25739 These require an extra suffix. (There are also "tail" versions
25740 of the restore routines and "GOT" versions of the save routines,
25741 but we don't generate those at present. Same problems apply,
25742 though.)
25744 We deal with all this by synthesizing our own prefix/suffix and
25745 using that for the simple sprintf call shown above. */
25746 if (DEFAULT_ABI == ABI_V4)
25748 if (TARGET_64BIT)
25749 goto aix_names;
25751 if ((sel & SAVRES_REG) == SAVRES_GPR)
25752 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25753 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25754 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25755 else if ((sel & SAVRES_REG) == SAVRES_VR)
25756 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25757 else
25758 abort ();
25760 if ((sel & SAVRES_LR))
25761 suffix = "_x";
25763 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25765 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25766 /* No out-of-line save/restore routines for GPRs on AIX. */
25767 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25768 #endif
25770 aix_names:
25771 if ((sel & SAVRES_REG) == SAVRES_GPR)
25772 prefix = ((sel & SAVRES_SAVE)
25773 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25774 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25775 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25777 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25778 if ((sel & SAVRES_LR))
25779 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25780 else
25781 #endif
25783 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25784 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25787 else if ((sel & SAVRES_REG) == SAVRES_VR)
25788 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25789 else
25790 abort ();
25793 if (DEFAULT_ABI == ABI_DARWIN)
25795 /* The Darwin approach is (slightly) different, in order to be
25796 compatible with code generated by the system toolchain. There is a
25797 single symbol for the start of save sequence, and the code here
25798 embeds an offset into that code on the basis of the first register
25799 to be saved. */
25800 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25801 if ((sel & SAVRES_REG) == SAVRES_GPR)
25802 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25803 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25804 (regno - 13) * 4, prefix, regno);
25805 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25806 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25807 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25808 else if ((sel & SAVRES_REG) == SAVRES_VR)
25809 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25810 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25811 else
25812 abort ();
25814 else
25815 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25817 return savres_routine_name;
25820 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25821 We are saving/restoring GPRs if GPR is true. */
25823 static rtx
25824 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25826 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25827 ? info->first_gp_reg_save
25828 : (sel & SAVRES_REG) == SAVRES_FPR
25829 ? info->first_fp_reg_save - 32
25830 : (sel & SAVRES_REG) == SAVRES_VR
25831 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25832 : -1);
25833 rtx sym;
25834 int select = sel;
25836 /* Don't generate bogus routine names. */
25837 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25838 && regno <= LAST_SAVRES_REGISTER
25839 && select >= 0 && select <= 12);
25841 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25843 if (sym == NULL)
25845 char *name;
25847 name = rs6000_savres_routine_name (regno, sel);
25849 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25850 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25851 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25854 return sym;
25857 /* Emit a sequence of insns, including a stack tie if needed, for
25858 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25859 reset the stack pointer, but move the base of the frame into
25860 reg UPDT_REGNO for use by out-of-line register restore routines. */
25862 static rtx
25863 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25864 unsigned updt_regno)
25866 /* If there is nothing to do, don't do anything. */
25867 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
25868 return NULL_RTX;
25870 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25872 /* This blockage is needed so that sched doesn't decide to move
25873 the sp change before the register restores. */
25874 if (DEFAULT_ABI == ABI_V4)
25875 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
25876 GEN_INT (frame_off)));
25878 /* If we are restoring registers out-of-line, we will be using the
25879 "exit" variants of the restore routines, which will reset the
25880 stack for us. But we do need to point updt_reg into the
25881 right place for those routines. */
25882 if (frame_off != 0)
25883 return emit_insn (gen_add3_insn (updt_reg_rtx,
25884 frame_reg_rtx, GEN_INT (frame_off)));
25885 else
25886 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
25888 return NULL_RTX;
25891 /* Return the register number used as a pointer by out-of-line
25892 save/restore functions. */
25894 static inline unsigned
25895 ptr_regno_for_savres (int sel)
25897 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25898 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
25899 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
25902 /* Construct a parallel rtx describing the effect of a call to an
25903 out-of-line register save/restore routine, and emit the insn
25904 or jump_insn as appropriate. */
25906 static rtx_insn *
25907 rs6000_emit_savres_rtx (rs6000_stack_t *info,
25908 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
25909 machine_mode reg_mode, int sel)
25911 int i;
25912 int offset, start_reg, end_reg, n_regs, use_reg;
25913 int reg_size = GET_MODE_SIZE (reg_mode);
25914 rtx sym;
25915 rtvec p;
25916 rtx par;
25917 rtx_insn *insn;
25919 offset = 0;
25920 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25921 ? info->first_gp_reg_save
25922 : (sel & SAVRES_REG) == SAVRES_FPR
25923 ? info->first_fp_reg_save
25924 : (sel & SAVRES_REG) == SAVRES_VR
25925 ? info->first_altivec_reg_save
25926 : -1);
25927 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25928 ? 32
25929 : (sel & SAVRES_REG) == SAVRES_FPR
25930 ? 64
25931 : (sel & SAVRES_REG) == SAVRES_VR
25932 ? LAST_ALTIVEC_REGNO + 1
25933 : -1);
25934 n_regs = end_reg - start_reg;
25935 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
25936 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
25937 + n_regs);
25939 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25940 RTVEC_ELT (p, offset++) = ret_rtx;
25942 RTVEC_ELT (p, offset++)
25943 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
25945 sym = rs6000_savres_routine_sym (info, sel);
25946 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
25948 use_reg = ptr_regno_for_savres (sel);
25949 if ((sel & SAVRES_REG) == SAVRES_VR)
25951 /* Vector regs are saved/restored using [reg+reg] addressing. */
25952 RTVEC_ELT (p, offset++)
25953 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25954 RTVEC_ELT (p, offset++)
25955 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
25957 else
25958 RTVEC_ELT (p, offset++)
25959 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25961 for (i = 0; i < end_reg - start_reg; i++)
25962 RTVEC_ELT (p, i + offset)
25963 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
25964 frame_reg_rtx, save_area_offset + reg_size * i,
25965 (sel & SAVRES_SAVE) != 0);
25967 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25968 RTVEC_ELT (p, i + offset)
25969 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
25971 par = gen_rtx_PARALLEL (VOIDmode, p);
25973 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25975 insn = emit_jump_insn (par);
25976 JUMP_LABEL (insn) = ret_rtx;
25978 else
25979 insn = emit_insn (par);
25980 return insn;
25983 /* Emit code to store CR fields that need to be saved into REG. */
25985 static void
25986 rs6000_emit_move_from_cr (rtx reg)
25988 /* Only the ELFv2 ABI allows storing only selected fields. */
25989 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
25991 int i, cr_reg[8], count = 0;
25993 /* Collect CR fields that must be saved. */
25994 for (i = 0; i < 8; i++)
25995 if (save_reg_p (CR0_REGNO + i))
25996 cr_reg[count++] = i;
25998 /* If it's just a single one, use mfcrf. */
25999 if (count == 1)
26001 rtvec p = rtvec_alloc (1);
26002 rtvec r = rtvec_alloc (2);
26003 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26004 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26005 RTVEC_ELT (p, 0)
26006 = gen_rtx_SET (reg,
26007 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26009 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26010 return;
26013 /* ??? It might be better to handle count == 2 / 3 cases here
26014 as well, using logical operations to combine the values. */
26017 emit_insn (gen_movesi_from_cr (reg));
26020 /* Return whether the split-stack arg pointer (r12) is used. */
26022 static bool
26023 split_stack_arg_pointer_used_p (void)
26025 /* If the pseudo holding the arg pointer is no longer a pseudo,
26026 then the arg pointer is used. */
26027 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26028 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26029 || (REGNO (cfun->machine->split_stack_arg_pointer)
26030 < FIRST_PSEUDO_REGISTER)))
26031 return true;
26033 /* Unfortunately we also need to do some code scanning, since
26034 r12 may have been substituted for the pseudo. */
26035 rtx_insn *insn;
26036 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26037 FOR_BB_INSNS (bb, insn)
26038 if (NONDEBUG_INSN_P (insn))
26040 /* A call destroys r12. */
26041 if (CALL_P (insn))
26042 return false;
26044 df_ref use;
26045 FOR_EACH_INSN_USE (use, insn)
26047 rtx x = DF_REF_REG (use);
26048 if (REG_P (x) && REGNO (x) == 12)
26049 return true;
26051 df_ref def;
26052 FOR_EACH_INSN_DEF (def, insn)
26054 rtx x = DF_REF_REG (def);
26055 if (REG_P (x) && REGNO (x) == 12)
26056 return false;
26059 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26062 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26064 static bool
26065 rs6000_global_entry_point_needed_p (void)
26067 /* Only needed for the ELFv2 ABI. */
26068 if (DEFAULT_ABI != ABI_ELFv2)
26069 return false;
26071 /* With -msingle-pic-base, we assume the whole program shares the same
26072 TOC, so no global entry point prologues are needed anywhere. */
26073 if (TARGET_SINGLE_PIC_BASE)
26074 return false;
26076 /* Ensure we have a global entry point for thunks. ??? We could
26077 avoid that if the target routine doesn't need a global entry point,
26078 but we do not know whether this is the case at this point. */
26079 if (cfun->is_thunk)
26080 return true;
26082 /* For regular functions, rs6000_emit_prologue sets this flag if the
26083 routine ever uses the TOC pointer. */
26084 return cfun->machine->r2_setup_needed;
26087 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26088 static sbitmap
26089 rs6000_get_separate_components (void)
26091 rs6000_stack_t *info = rs6000_stack_info ();
26093 if (WORLD_SAVE_P (info))
26094 return NULL;
26096 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26097 && !(info->savres_strategy & REST_MULTIPLE));
26099 /* Component 0 is the save/restore of LR (done via GPR0).
26100 Components 13..31 are the save/restore of GPR13..GPR31.
26101 Components 46..63 are the save/restore of FPR14..FPR31. */
26103 cfun->machine->n_components = 64;
26105 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26106 bitmap_clear (components);
26108 int reg_size = TARGET_32BIT ? 4 : 8;
26109 int fp_reg_size = 8;
26111 /* The GPRs we need saved to the frame. */
26112 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26113 && (info->savres_strategy & REST_INLINE_GPRS))
26115 int offset = info->gp_save_offset;
26116 if (info->push_p)
26117 offset += info->total_size;
26119 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26121 if (IN_RANGE (offset, -0x8000, 0x7fff)
26122 && rs6000_reg_live_or_pic_offset_p (regno))
26123 bitmap_set_bit (components, regno);
26125 offset += reg_size;
26129 /* Don't mess with the hard frame pointer. */
26130 if (frame_pointer_needed)
26131 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26133 /* Don't mess with the fixed TOC register. */
26134 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26135 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26136 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26137 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26139 /* The FPRs we need saved to the frame. */
26140 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26141 && (info->savres_strategy & REST_INLINE_FPRS))
26143 int offset = info->fp_save_offset;
26144 if (info->push_p)
26145 offset += info->total_size;
26147 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26149 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26150 bitmap_set_bit (components, regno);
26152 offset += fp_reg_size;
26156 /* Optimize LR save and restore if we can. This is component 0. Any
26157 out-of-line register save/restore routines need LR. */
26158 if (info->lr_save_p
26159 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26160 && (info->savres_strategy & SAVE_INLINE_GPRS)
26161 && (info->savres_strategy & REST_INLINE_GPRS)
26162 && (info->savres_strategy & SAVE_INLINE_FPRS)
26163 && (info->savres_strategy & REST_INLINE_FPRS)
26164 && (info->savres_strategy & SAVE_INLINE_VRS)
26165 && (info->savres_strategy & REST_INLINE_VRS))
26167 int offset = info->lr_save_offset;
26168 if (info->push_p)
26169 offset += info->total_size;
26170 if (IN_RANGE (offset, -0x8000, 0x7fff))
26171 bitmap_set_bit (components, 0);
26174 return components;
26177 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26178 static sbitmap
26179 rs6000_components_for_bb (basic_block bb)
26181 rs6000_stack_t *info = rs6000_stack_info ();
26183 bitmap in = DF_LIVE_IN (bb);
26184 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26185 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26187 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26188 bitmap_clear (components);
26190 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26192 /* GPRs. */
26193 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26194 if (bitmap_bit_p (in, regno)
26195 || bitmap_bit_p (gen, regno)
26196 || bitmap_bit_p (kill, regno))
26197 bitmap_set_bit (components, regno);
26199 /* FPRs. */
26200 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26201 if (bitmap_bit_p (in, regno)
26202 || bitmap_bit_p (gen, regno)
26203 || bitmap_bit_p (kill, regno))
26204 bitmap_set_bit (components, regno);
26206 /* The link register. */
26207 if (bitmap_bit_p (in, LR_REGNO)
26208 || bitmap_bit_p (gen, LR_REGNO)
26209 || bitmap_bit_p (kill, LR_REGNO))
26210 bitmap_set_bit (components, 0);
26212 return components;
26215 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26216 static void
26217 rs6000_disqualify_components (sbitmap components, edge e,
26218 sbitmap edge_components, bool /*is_prologue*/)
26220 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26221 live where we want to place that code. */
26222 if (bitmap_bit_p (edge_components, 0)
26223 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26225 if (dump_file)
26226 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26227 "on entry to bb %d\n", e->dest->index);
26228 bitmap_clear_bit (components, 0);
26232 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26233 static void
26234 rs6000_emit_prologue_components (sbitmap components)
26236 rs6000_stack_t *info = rs6000_stack_info ();
26237 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26238 ? HARD_FRAME_POINTER_REGNUM
26239 : STACK_POINTER_REGNUM);
26241 machine_mode reg_mode = Pmode;
26242 int reg_size = TARGET_32BIT ? 4 : 8;
26243 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26244 ? DFmode : SFmode;
26245 int fp_reg_size = 8;
26247 /* Prologue for LR. */
26248 if (bitmap_bit_p (components, 0))
26250 rtx reg = gen_rtx_REG (reg_mode, 0);
26251 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26252 RTX_FRAME_RELATED_P (insn) = 1;
26253 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26255 int offset = info->lr_save_offset;
26256 if (info->push_p)
26257 offset += info->total_size;
26259 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26260 RTX_FRAME_RELATED_P (insn) = 1;
26261 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26262 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26263 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26266 /* Prologue for the GPRs. */
26267 int offset = info->gp_save_offset;
26268 if (info->push_p)
26269 offset += info->total_size;
26271 for (int i = info->first_gp_reg_save; i < 32; i++)
26273 if (bitmap_bit_p (components, i))
26275 rtx reg = gen_rtx_REG (reg_mode, i);
26276 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26277 RTX_FRAME_RELATED_P (insn) = 1;
26278 rtx set = copy_rtx (single_set (insn));
26279 add_reg_note (insn, REG_CFA_OFFSET, set);
26282 offset += reg_size;
26285 /* Prologue for the FPRs. */
26286 offset = info->fp_save_offset;
26287 if (info->push_p)
26288 offset += info->total_size;
26290 for (int i = info->first_fp_reg_save; i < 64; i++)
26292 if (bitmap_bit_p (components, i))
26294 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26295 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26296 RTX_FRAME_RELATED_P (insn) = 1;
26297 rtx set = copy_rtx (single_set (insn));
26298 add_reg_note (insn, REG_CFA_OFFSET, set);
26301 offset += fp_reg_size;
26305 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26306 static void
26307 rs6000_emit_epilogue_components (sbitmap components)
26309 rs6000_stack_t *info = rs6000_stack_info ();
26310 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26311 ? HARD_FRAME_POINTER_REGNUM
26312 : STACK_POINTER_REGNUM);
26314 machine_mode reg_mode = Pmode;
26315 int reg_size = TARGET_32BIT ? 4 : 8;
26317 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26318 ? DFmode : SFmode;
26319 int fp_reg_size = 8;
26321 /* Epilogue for the FPRs. */
26322 int offset = info->fp_save_offset;
26323 if (info->push_p)
26324 offset += info->total_size;
26326 for (int i = info->first_fp_reg_save; i < 64; i++)
26328 if (bitmap_bit_p (components, i))
26330 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26331 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26332 RTX_FRAME_RELATED_P (insn) = 1;
26333 add_reg_note (insn, REG_CFA_RESTORE, reg);
26336 offset += fp_reg_size;
26339 /* Epilogue for the GPRs. */
26340 offset = info->gp_save_offset;
26341 if (info->push_p)
26342 offset += info->total_size;
26344 for (int i = info->first_gp_reg_save; i < 32; i++)
26346 if (bitmap_bit_p (components, i))
26348 rtx reg = gen_rtx_REG (reg_mode, i);
26349 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26350 RTX_FRAME_RELATED_P (insn) = 1;
26351 add_reg_note (insn, REG_CFA_RESTORE, reg);
26354 offset += reg_size;
26357 /* Epilogue for LR. */
26358 if (bitmap_bit_p (components, 0))
26360 int offset = info->lr_save_offset;
26361 if (info->push_p)
26362 offset += info->total_size;
26364 rtx reg = gen_rtx_REG (reg_mode, 0);
26365 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26367 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26368 insn = emit_move_insn (lr, reg);
26369 RTX_FRAME_RELATED_P (insn) = 1;
26370 add_reg_note (insn, REG_CFA_RESTORE, lr);
26374 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26375 static void
26376 rs6000_set_handled_components (sbitmap components)
26378 rs6000_stack_t *info = rs6000_stack_info ();
26380 for (int i = info->first_gp_reg_save; i < 32; i++)
26381 if (bitmap_bit_p (components, i))
26382 cfun->machine->gpr_is_wrapped_separately[i] = true;
26384 for (int i = info->first_fp_reg_save; i < 64; i++)
26385 if (bitmap_bit_p (components, i))
26386 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26388 if (bitmap_bit_p (components, 0))
26389 cfun->machine->lr_is_wrapped_separately = true;
26392 /* VRSAVE is a bit vector representing which AltiVec registers
26393 are used. The OS uses this to determine which vector
26394 registers to save on a context switch. We need to save
26395 VRSAVE on the stack frame, add whatever AltiVec registers we
26396 used in this function, and do the corresponding magic in the
26397 epilogue. */
26398 static void
26399 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26400 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26402 /* Get VRSAVE into a GPR. */
26403 rtx reg = gen_rtx_REG (SImode, save_regno);
26404 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26405 if (TARGET_MACHO)
26406 emit_insn (gen_get_vrsave_internal (reg));
26407 else
26408 emit_insn (gen_rtx_SET (reg, vrsave));
26410 /* Save VRSAVE. */
26411 int offset = info->vrsave_save_offset + frame_off;
26412 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26414 /* Include the registers in the mask. */
26415 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26417 emit_insn (generate_set_vrsave (reg, info, 0));
26420 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26421 called, it left the arg pointer to the old stack in r29. Otherwise, the
26422 arg pointer is the top of the current frame. */
26423 static void
26424 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26425 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26427 cfun->machine->split_stack_argp_used = true;
26429 if (sp_adjust)
26431 rtx r12 = gen_rtx_REG (Pmode, 12);
26432 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26433 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26434 emit_insn_before (set_r12, sp_adjust);
26436 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26438 rtx r12 = gen_rtx_REG (Pmode, 12);
26439 if (frame_off == 0)
26440 emit_move_insn (r12, frame_reg_rtx);
26441 else
26442 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26445 if (info->push_p)
26447 rtx r12 = gen_rtx_REG (Pmode, 12);
26448 rtx r29 = gen_rtx_REG (Pmode, 29);
26449 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26450 rtx not_more = gen_label_rtx ();
26451 rtx jump;
26453 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26454 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26455 gen_rtx_LABEL_REF (VOIDmode, not_more),
26456 pc_rtx);
26457 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26458 JUMP_LABEL (jump) = not_more;
26459 LABEL_NUSES (not_more) += 1;
26460 emit_move_insn (r12, r29);
26461 emit_label (not_more);
26465 /* Emit function prologue as insns. */
26467 void
26468 rs6000_emit_prologue (void)
26470 rs6000_stack_t *info = rs6000_stack_info ();
26471 machine_mode reg_mode = Pmode;
26472 int reg_size = TARGET_32BIT ? 4 : 8;
26473 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26474 ? DFmode : SFmode;
26475 int fp_reg_size = 8;
26476 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26477 rtx frame_reg_rtx = sp_reg_rtx;
26478 unsigned int cr_save_regno;
26479 rtx cr_save_rtx = NULL_RTX;
26480 rtx_insn *insn;
26481 int strategy;
26482 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26483 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26484 && call_used_regs[STATIC_CHAIN_REGNUM]);
26485 int using_split_stack = (flag_split_stack
26486 && (lookup_attribute ("no_split_stack",
26487 DECL_ATTRIBUTES (cfun->decl))
26488 == NULL));
26490 /* Offset to top of frame for frame_reg and sp respectively. */
26491 HOST_WIDE_INT frame_off = 0;
26492 HOST_WIDE_INT sp_off = 0;
26493 /* sp_adjust is the stack adjusting instruction, tracked so that the
26494 insn setting up the split-stack arg pointer can be emitted just
26495 prior to it, when r12 is not used here for other purposes. */
26496 rtx_insn *sp_adjust = 0;
26498 #if CHECKING_P
26499 /* Track and check usage of r0, r11, r12. */
26500 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26501 #define START_USE(R) do \
26503 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26504 reg_inuse |= 1 << (R); \
26505 } while (0)
26506 #define END_USE(R) do \
26508 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26509 reg_inuse &= ~(1 << (R)); \
26510 } while (0)
26511 #define NOT_INUSE(R) do \
26513 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26514 } while (0)
26515 #else
26516 #define START_USE(R) do {} while (0)
26517 #define END_USE(R) do {} while (0)
26518 #define NOT_INUSE(R) do {} while (0)
26519 #endif
26521 if (DEFAULT_ABI == ABI_ELFv2
26522 && !TARGET_SINGLE_PIC_BASE)
26524 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26526 /* With -mminimal-toc we may generate an extra use of r2 below. */
26527 if (TARGET_TOC && TARGET_MINIMAL_TOC
26528 && !constant_pool_empty_p ())
26529 cfun->machine->r2_setup_needed = true;
26533 if (flag_stack_usage_info)
26534 current_function_static_stack_size = info->total_size;
26536 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26538 HOST_WIDE_INT size = info->total_size;
26540 if (crtl->is_leaf && !cfun->calls_alloca)
26542 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26543 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26544 size - STACK_CHECK_PROTECT);
26546 else if (size > 0)
26547 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26550 if (TARGET_FIX_AND_CONTINUE)
26552 /* gdb on darwin arranges to forward a function from the old
26553 address by modifying the first 5 instructions of the function
26554 to branch to the overriding function. This is necessary to
26555 permit function pointers that point to the old function to
26556 actually forward to the new function. */
26557 emit_insn (gen_nop ());
26558 emit_insn (gen_nop ());
26559 emit_insn (gen_nop ());
26560 emit_insn (gen_nop ());
26561 emit_insn (gen_nop ());
26564 /* Handle world saves specially here. */
26565 if (WORLD_SAVE_P (info))
26567 int i, j, sz;
26568 rtx treg;
26569 rtvec p;
26570 rtx reg0;
26572 /* save_world expects lr in r0. */
26573 reg0 = gen_rtx_REG (Pmode, 0);
26574 if (info->lr_save_p)
26576 insn = emit_move_insn (reg0,
26577 gen_rtx_REG (Pmode, LR_REGNO));
26578 RTX_FRAME_RELATED_P (insn) = 1;
26581 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26582 assumptions about the offsets of various bits of the stack
26583 frame. */
26584 gcc_assert (info->gp_save_offset == -220
26585 && info->fp_save_offset == -144
26586 && info->lr_save_offset == 8
26587 && info->cr_save_offset == 4
26588 && info->push_p
26589 && info->lr_save_p
26590 && (!crtl->calls_eh_return
26591 || info->ehrd_offset == -432)
26592 && info->vrsave_save_offset == -224
26593 && info->altivec_save_offset == -416);
26595 treg = gen_rtx_REG (SImode, 11);
26596 emit_move_insn (treg, GEN_INT (-info->total_size));
26598 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26599 in R11. It also clobbers R12, so beware! */
26601 /* Preserve CR2 for save_world prologues */
26602 sz = 5;
26603 sz += 32 - info->first_gp_reg_save;
26604 sz += 64 - info->first_fp_reg_save;
26605 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26606 p = rtvec_alloc (sz);
26607 j = 0;
26608 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26609 gen_rtx_REG (SImode,
26610 LR_REGNO));
26611 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26612 gen_rtx_SYMBOL_REF (Pmode,
26613 "*save_world"));
26614 /* We do floats first so that the instruction pattern matches
26615 properly. */
26616 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26617 RTVEC_ELT (p, j++)
26618 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26619 ? DFmode : SFmode,
26620 info->first_fp_reg_save + i),
26621 frame_reg_rtx,
26622 info->fp_save_offset + frame_off + 8 * i);
26623 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26624 RTVEC_ELT (p, j++)
26625 = gen_frame_store (gen_rtx_REG (V4SImode,
26626 info->first_altivec_reg_save + i),
26627 frame_reg_rtx,
26628 info->altivec_save_offset + frame_off + 16 * i);
26629 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26630 RTVEC_ELT (p, j++)
26631 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26632 frame_reg_rtx,
26633 info->gp_save_offset + frame_off + reg_size * i);
26635 /* CR register traditionally saved as CR2. */
26636 RTVEC_ELT (p, j++)
26637 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26638 frame_reg_rtx, info->cr_save_offset + frame_off);
26639 /* Explain about use of R0. */
26640 if (info->lr_save_p)
26641 RTVEC_ELT (p, j++)
26642 = gen_frame_store (reg0,
26643 frame_reg_rtx, info->lr_save_offset + frame_off);
26644 /* Explain what happens to the stack pointer. */
26646 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26647 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26650 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26651 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26652 treg, GEN_INT (-info->total_size));
26653 sp_off = frame_off = info->total_size;
26656 strategy = info->savres_strategy;
26658 /* For V.4, update stack before we do any saving and set back pointer. */
26659 if (! WORLD_SAVE_P (info)
26660 && info->push_p
26661 && (DEFAULT_ABI == ABI_V4
26662 || crtl->calls_eh_return))
26664 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26665 || !(strategy & SAVE_INLINE_GPRS)
26666 || !(strategy & SAVE_INLINE_VRS));
26667 int ptr_regno = -1;
26668 rtx ptr_reg = NULL_RTX;
26669 int ptr_off = 0;
26671 if (info->total_size < 32767)
26672 frame_off = info->total_size;
26673 else if (need_r11)
26674 ptr_regno = 11;
26675 else if (info->cr_save_p
26676 || info->lr_save_p
26677 || info->first_fp_reg_save < 64
26678 || info->first_gp_reg_save < 32
26679 || info->altivec_size != 0
26680 || info->vrsave_size != 0
26681 || crtl->calls_eh_return)
26682 ptr_regno = 12;
26683 else
26685 /* The prologue won't be saving any regs so there is no need
26686 to set up a frame register to access any frame save area.
26687 We also won't be using frame_off anywhere below, but set
26688 the correct value anyway to protect against future
26689 changes to this function. */
26690 frame_off = info->total_size;
26692 if (ptr_regno != -1)
26694 /* Set up the frame offset to that needed by the first
26695 out-of-line save function. */
26696 START_USE (ptr_regno);
26697 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26698 frame_reg_rtx = ptr_reg;
26699 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26700 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26701 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26702 ptr_off = info->gp_save_offset + info->gp_size;
26703 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26704 ptr_off = info->altivec_save_offset + info->altivec_size;
26705 frame_off = -ptr_off;
26707 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26708 ptr_reg, ptr_off);
26709 if (REGNO (frame_reg_rtx) == 12)
26710 sp_adjust = 0;
26711 sp_off = info->total_size;
26712 if (frame_reg_rtx != sp_reg_rtx)
26713 rs6000_emit_stack_tie (frame_reg_rtx, false);
26716 /* If we use the link register, get it into r0. */
26717 if (!WORLD_SAVE_P (info) && info->lr_save_p
26718 && !cfun->machine->lr_is_wrapped_separately)
26720 rtx addr, reg, mem;
26722 reg = gen_rtx_REG (Pmode, 0);
26723 START_USE (0);
26724 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26725 RTX_FRAME_RELATED_P (insn) = 1;
26727 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26728 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26730 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26731 GEN_INT (info->lr_save_offset + frame_off));
26732 mem = gen_rtx_MEM (Pmode, addr);
26733 /* This should not be of rs6000_sr_alias_set, because of
26734 __builtin_return_address. */
26736 insn = emit_move_insn (mem, reg);
26737 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26738 NULL_RTX, NULL_RTX);
26739 END_USE (0);
26743 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26744 r12 will be needed by out-of-line gpr restore. */
26745 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26746 && !(strategy & (SAVE_INLINE_GPRS
26747 | SAVE_NOINLINE_GPRS_SAVES_LR))
26748 ? 11 : 12);
26749 if (!WORLD_SAVE_P (info)
26750 && info->cr_save_p
26751 && REGNO (frame_reg_rtx) != cr_save_regno
26752 && !(using_static_chain_p && cr_save_regno == 11)
26753 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26755 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26756 START_USE (cr_save_regno);
26757 rs6000_emit_move_from_cr (cr_save_rtx);
26760 /* Do any required saving of fpr's. If only one or two to save, do
26761 it ourselves. Otherwise, call function. */
26762 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26764 int offset = info->fp_save_offset + frame_off;
26765 for (int i = info->first_fp_reg_save; i < 64; i++)
26767 if (save_reg_p (i)
26768 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26769 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26770 sp_off - frame_off);
26772 offset += fp_reg_size;
26775 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26777 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26778 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26779 unsigned ptr_regno = ptr_regno_for_savres (sel);
26780 rtx ptr_reg = frame_reg_rtx;
26782 if (REGNO (frame_reg_rtx) == ptr_regno)
26783 gcc_checking_assert (frame_off == 0);
26784 else
26786 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26787 NOT_INUSE (ptr_regno);
26788 emit_insn (gen_add3_insn (ptr_reg,
26789 frame_reg_rtx, GEN_INT (frame_off)));
26791 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26792 info->fp_save_offset,
26793 info->lr_save_offset,
26794 DFmode, sel);
26795 rs6000_frame_related (insn, ptr_reg, sp_off,
26796 NULL_RTX, NULL_RTX);
26797 if (lr)
26798 END_USE (0);
26801 /* Save GPRs. This is done as a PARALLEL if we are using
26802 the store-multiple instructions. */
26803 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26805 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26806 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26807 unsigned ptr_regno = ptr_regno_for_savres (sel);
26808 rtx ptr_reg = frame_reg_rtx;
26809 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26810 int end_save = info->gp_save_offset + info->gp_size;
26811 int ptr_off;
26813 if (ptr_regno == 12)
26814 sp_adjust = 0;
26815 if (!ptr_set_up)
26816 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26818 /* Need to adjust r11 (r12) if we saved any FPRs. */
26819 if (end_save + frame_off != 0)
26821 rtx offset = GEN_INT (end_save + frame_off);
26823 if (ptr_set_up)
26824 frame_off = -end_save;
26825 else
26826 NOT_INUSE (ptr_regno);
26827 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26829 else if (!ptr_set_up)
26831 NOT_INUSE (ptr_regno);
26832 emit_move_insn (ptr_reg, frame_reg_rtx);
26834 ptr_off = -end_save;
26835 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26836 info->gp_save_offset + ptr_off,
26837 info->lr_save_offset + ptr_off,
26838 reg_mode, sel);
26839 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26840 NULL_RTX, NULL_RTX);
26841 if (lr)
26842 END_USE (0);
26844 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26846 rtvec p;
26847 int i;
26848 p = rtvec_alloc (32 - info->first_gp_reg_save);
26849 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26850 RTVEC_ELT (p, i)
26851 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26852 frame_reg_rtx,
26853 info->gp_save_offset + frame_off + reg_size * i);
26854 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26855 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26856 NULL_RTX, NULL_RTX);
26858 else if (!WORLD_SAVE_P (info))
26860 int offset = info->gp_save_offset + frame_off;
26861 for (int i = info->first_gp_reg_save; i < 32; i++)
26863 if (rs6000_reg_live_or_pic_offset_p (i)
26864 && !cfun->machine->gpr_is_wrapped_separately[i])
26865 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
26866 sp_off - frame_off);
26868 offset += reg_size;
26872 if (crtl->calls_eh_return)
26874 unsigned int i;
26875 rtvec p;
26877 for (i = 0; ; ++i)
26879 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26880 if (regno == INVALID_REGNUM)
26881 break;
26884 p = rtvec_alloc (i);
26886 for (i = 0; ; ++i)
26888 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26889 if (regno == INVALID_REGNUM)
26890 break;
26892 rtx set
26893 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
26894 sp_reg_rtx,
26895 info->ehrd_offset + sp_off + reg_size * (int) i);
26896 RTVEC_ELT (p, i) = set;
26897 RTX_FRAME_RELATED_P (set) = 1;
26900 insn = emit_insn (gen_blockage ());
26901 RTX_FRAME_RELATED_P (insn) = 1;
26902 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
26905 /* In AIX ABI we need to make sure r2 is really saved. */
26906 if (TARGET_AIX && crtl->calls_eh_return)
26908 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
26909 rtx join_insn, note;
26910 rtx_insn *save_insn;
26911 long toc_restore_insn;
26913 tmp_reg = gen_rtx_REG (Pmode, 11);
26914 tmp_reg_si = gen_rtx_REG (SImode, 11);
26915 if (using_static_chain_p)
26917 START_USE (0);
26918 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
26920 else
26921 START_USE (11);
26922 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
26923 /* Peek at instruction to which this function returns. If it's
26924 restoring r2, then we know we've already saved r2. We can't
26925 unconditionally save r2 because the value we have will already
26926 be updated if we arrived at this function via a plt call or
26927 toc adjusting stub. */
26928 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
26929 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
26930 + RS6000_TOC_SAVE_SLOT);
26931 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
26932 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
26933 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
26934 validate_condition_mode (EQ, CCUNSmode);
26935 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
26936 emit_insn (gen_rtx_SET (compare_result,
26937 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
26938 toc_save_done = gen_label_rtx ();
26939 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26940 gen_rtx_EQ (VOIDmode, compare_result,
26941 const0_rtx),
26942 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
26943 pc_rtx);
26944 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26945 JUMP_LABEL (jump) = toc_save_done;
26946 LABEL_NUSES (toc_save_done) += 1;
26948 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
26949 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
26950 sp_off - frame_off);
26952 emit_label (toc_save_done);
26954 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26955 have a CFG that has different saves along different paths.
26956 Move the note to a dummy blockage insn, which describes that
26957 R2 is unconditionally saved after the label. */
26958 /* ??? An alternate representation might be a special insn pattern
26959 containing both the branch and the store. That might let the
26960 code that minimizes the number of DW_CFA_advance opcodes better
26961 freedom in placing the annotations. */
26962 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
26963 if (note)
26964 remove_note (save_insn, note);
26965 else
26966 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
26967 copy_rtx (PATTERN (save_insn)), NULL_RTX);
26968 RTX_FRAME_RELATED_P (save_insn) = 0;
26970 join_insn = emit_insn (gen_blockage ());
26971 REG_NOTES (join_insn) = note;
26972 RTX_FRAME_RELATED_P (join_insn) = 1;
26974 if (using_static_chain_p)
26976 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
26977 END_USE (0);
26979 else
26980 END_USE (11);
26983 /* Save CR if we use any that must be preserved. */
26984 if (!WORLD_SAVE_P (info) && info->cr_save_p)
26986 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26987 GEN_INT (info->cr_save_offset + frame_off));
26988 rtx mem = gen_frame_mem (SImode, addr);
26990 /* If we didn't copy cr before, do so now using r0. */
26991 if (cr_save_rtx == NULL_RTX)
26993 START_USE (0);
26994 cr_save_rtx = gen_rtx_REG (SImode, 0);
26995 rs6000_emit_move_from_cr (cr_save_rtx);
26998 /* Saving CR requires a two-instruction sequence: one instruction
26999 to move the CR to a general-purpose register, and a second
27000 instruction that stores the GPR to memory.
27002 We do not emit any DWARF CFI records for the first of these,
27003 because we cannot properly represent the fact that CR is saved in
27004 a register. One reason is that we cannot express that multiple
27005 CR fields are saved; another reason is that on 64-bit, the size
27006 of the CR register in DWARF (4 bytes) differs from the size of
27007 a general-purpose register.
27009 This means if any intervening instruction were to clobber one of
27010 the call-saved CR fields, we'd have incorrect CFI. To prevent
27011 this from happening, we mark the store to memory as a use of
27012 those CR fields, which prevents any such instruction from being
27013 scheduled in between the two instructions. */
27014 rtx crsave_v[9];
27015 int n_crsave = 0;
27016 int i;
27018 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27019 for (i = 0; i < 8; i++)
27020 if (save_reg_p (CR0_REGNO + i))
27021 crsave_v[n_crsave++]
27022 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27024 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27025 gen_rtvec_v (n_crsave, crsave_v)));
27026 END_USE (REGNO (cr_save_rtx));
27028 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27029 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27030 so we need to construct a frame expression manually. */
27031 RTX_FRAME_RELATED_P (insn) = 1;
27033 /* Update address to be stack-pointer relative, like
27034 rs6000_frame_related would do. */
27035 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27036 GEN_INT (info->cr_save_offset + sp_off));
27037 mem = gen_frame_mem (SImode, addr);
27039 if (DEFAULT_ABI == ABI_ELFv2)
27041 /* In the ELFv2 ABI we generate separate CFI records for each
27042 CR field that was actually saved. They all point to the
27043 same 32-bit stack slot. */
27044 rtx crframe[8];
27045 int n_crframe = 0;
27047 for (i = 0; i < 8; i++)
27048 if (save_reg_p (CR0_REGNO + i))
27050 crframe[n_crframe]
27051 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27053 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27054 n_crframe++;
27057 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27058 gen_rtx_PARALLEL (VOIDmode,
27059 gen_rtvec_v (n_crframe, crframe)));
27061 else
27063 /* In other ABIs, by convention, we use a single CR regnum to
27064 represent the fact that all call-saved CR fields are saved.
27065 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27066 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27067 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27071 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27072 *separate* slots if the routine calls __builtin_eh_return, so
27073 that they can be independently restored by the unwinder. */
27074 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27076 int i, cr_off = info->ehcr_offset;
27077 rtx crsave;
27079 /* ??? We might get better performance by using multiple mfocrf
27080 instructions. */
27081 crsave = gen_rtx_REG (SImode, 0);
27082 emit_insn (gen_movesi_from_cr (crsave));
27084 for (i = 0; i < 8; i++)
27085 if (!call_used_regs[CR0_REGNO + i])
27087 rtvec p = rtvec_alloc (2);
27088 RTVEC_ELT (p, 0)
27089 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27090 RTVEC_ELT (p, 1)
27091 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27093 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27095 RTX_FRAME_RELATED_P (insn) = 1;
27096 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27097 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27098 sp_reg_rtx, cr_off + sp_off));
27100 cr_off += reg_size;
27104 /* Update stack and set back pointer unless this is V.4,
27105 for which it was done previously. */
27106 if (!WORLD_SAVE_P (info) && info->push_p
27107 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27109 rtx ptr_reg = NULL;
27110 int ptr_off = 0;
27112 /* If saving altivec regs we need to be able to address all save
27113 locations using a 16-bit offset. */
27114 if ((strategy & SAVE_INLINE_VRS) == 0
27115 || (info->altivec_size != 0
27116 && (info->altivec_save_offset + info->altivec_size - 16
27117 + info->total_size - frame_off) > 32767)
27118 || (info->vrsave_size != 0
27119 && (info->vrsave_save_offset
27120 + info->total_size - frame_off) > 32767))
27122 int sel = SAVRES_SAVE | SAVRES_VR;
27123 unsigned ptr_regno = ptr_regno_for_savres (sel);
27125 if (using_static_chain_p
27126 && ptr_regno == STATIC_CHAIN_REGNUM)
27127 ptr_regno = 12;
27128 if (REGNO (frame_reg_rtx) != ptr_regno)
27129 START_USE (ptr_regno);
27130 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27131 frame_reg_rtx = ptr_reg;
27132 ptr_off = info->altivec_save_offset + info->altivec_size;
27133 frame_off = -ptr_off;
27135 else if (REGNO (frame_reg_rtx) == 1)
27136 frame_off = info->total_size;
27137 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27138 ptr_reg, ptr_off);
27139 if (REGNO (frame_reg_rtx) == 12)
27140 sp_adjust = 0;
27141 sp_off = info->total_size;
27142 if (frame_reg_rtx != sp_reg_rtx)
27143 rs6000_emit_stack_tie (frame_reg_rtx, false);
27146 /* Set frame pointer, if needed. */
27147 if (frame_pointer_needed)
27149 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27150 sp_reg_rtx);
27151 RTX_FRAME_RELATED_P (insn) = 1;
27154 /* Save AltiVec registers if needed. Save here because the red zone does
27155 not always include AltiVec registers. */
27156 if (!WORLD_SAVE_P (info)
27157 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27159 int end_save = info->altivec_save_offset + info->altivec_size;
27160 int ptr_off;
27161 /* Oddly, the vector save/restore functions point r0 at the end
27162 of the save area, then use r11 or r12 to load offsets for
27163 [reg+reg] addressing. */
27164 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27165 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27166 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27168 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27169 NOT_INUSE (0);
27170 if (scratch_regno == 12)
27171 sp_adjust = 0;
27172 if (end_save + frame_off != 0)
27174 rtx offset = GEN_INT (end_save + frame_off);
27176 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27178 else
27179 emit_move_insn (ptr_reg, frame_reg_rtx);
27181 ptr_off = -end_save;
27182 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27183 info->altivec_save_offset + ptr_off,
27184 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27185 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27186 NULL_RTX, NULL_RTX);
27187 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27189 /* The oddity mentioned above clobbered our frame reg. */
27190 emit_move_insn (frame_reg_rtx, ptr_reg);
27191 frame_off = ptr_off;
27194 else if (!WORLD_SAVE_P (info)
27195 && info->altivec_size != 0)
27197 int i;
27199 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27200 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27202 rtx areg, savereg, mem;
27203 HOST_WIDE_INT offset;
27205 offset = (info->altivec_save_offset + frame_off
27206 + 16 * (i - info->first_altivec_reg_save));
27208 savereg = gen_rtx_REG (V4SImode, i);
27210 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27212 mem = gen_frame_mem (V4SImode,
27213 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27214 GEN_INT (offset)));
27215 insn = emit_insn (gen_rtx_SET (mem, savereg));
27216 areg = NULL_RTX;
27218 else
27220 NOT_INUSE (0);
27221 areg = gen_rtx_REG (Pmode, 0);
27222 emit_move_insn (areg, GEN_INT (offset));
27224 /* AltiVec addressing mode is [reg+reg]. */
27225 mem = gen_frame_mem (V4SImode,
27226 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27228 /* Rather than emitting a generic move, force use of the stvx
27229 instruction, which we always want on ISA 2.07 (power8) systems.
27230 In particular we don't want xxpermdi/stxvd2x for little
27231 endian. */
27232 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27235 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27236 areg, GEN_INT (offset));
27240 /* VRSAVE is a bit vector representing which AltiVec registers
27241 are used. The OS uses this to determine which vector
27242 registers to save on a context switch. We need to save
27243 VRSAVE on the stack frame, add whatever AltiVec registers we
27244 used in this function, and do the corresponding magic in the
27245 epilogue. */
27247 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27249 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27250 be using r12 as frame_reg_rtx and r11 as the static chain
27251 pointer for nested functions. */
27252 int save_regno = 12;
27253 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27254 && !using_static_chain_p)
27255 save_regno = 11;
27256 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27258 save_regno = 11;
27259 if (using_static_chain_p)
27260 save_regno = 0;
27262 NOT_INUSE (save_regno);
27264 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27267 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27268 if (!TARGET_SINGLE_PIC_BASE
27269 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27270 && !constant_pool_empty_p ())
27271 || (DEFAULT_ABI == ABI_V4
27272 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27273 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27275 /* If emit_load_toc_table will use the link register, we need to save
27276 it. We use R12 for this purpose because emit_load_toc_table
27277 can use register 0. This allows us to use a plain 'blr' to return
27278 from the procedure more often. */
27279 int save_LR_around_toc_setup = (TARGET_ELF
27280 && DEFAULT_ABI == ABI_V4
27281 && flag_pic
27282 && ! info->lr_save_p
27283 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27284 if (save_LR_around_toc_setup)
27286 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27287 rtx tmp = gen_rtx_REG (Pmode, 12);
27289 sp_adjust = 0;
27290 insn = emit_move_insn (tmp, lr);
27291 RTX_FRAME_RELATED_P (insn) = 1;
27293 rs6000_emit_load_toc_table (TRUE);
27295 insn = emit_move_insn (lr, tmp);
27296 add_reg_note (insn, REG_CFA_RESTORE, lr);
27297 RTX_FRAME_RELATED_P (insn) = 1;
27299 else
27300 rs6000_emit_load_toc_table (TRUE);
27303 #if TARGET_MACHO
27304 if (!TARGET_SINGLE_PIC_BASE
27305 && DEFAULT_ABI == ABI_DARWIN
27306 && flag_pic && crtl->uses_pic_offset_table)
27308 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27309 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27311 /* Save and restore LR locally around this call (in R0). */
27312 if (!info->lr_save_p)
27313 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27315 emit_insn (gen_load_macho_picbase (src));
27317 emit_move_insn (gen_rtx_REG (Pmode,
27318 RS6000_PIC_OFFSET_TABLE_REGNUM),
27319 lr);
27321 if (!info->lr_save_p)
27322 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27324 #endif
27326 /* If we need to, save the TOC register after doing the stack setup.
27327 Do not emit eh frame info for this save. The unwinder wants info,
27328 conceptually attached to instructions in this function, about
27329 register values in the caller of this function. This R2 may have
27330 already been changed from the value in the caller.
27331 We don't attempt to write accurate DWARF EH frame info for R2
27332 because code emitted by gcc for a (non-pointer) function call
27333 doesn't save and restore R2. Instead, R2 is managed out-of-line
27334 by a linker generated plt call stub when the function resides in
27335 a shared library. This behavior is costly to describe in DWARF,
27336 both in terms of the size of DWARF info and the time taken in the
27337 unwinder to interpret it. R2 changes, apart from the
27338 calls_eh_return case earlier in this function, are handled by
27339 linux-unwind.h frob_update_context. */
27340 if (rs6000_save_toc_in_prologue_p ())
27342 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27343 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27346 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27347 if (using_split_stack && split_stack_arg_pointer_used_p ())
27348 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27351 /* Output .extern statements for the save/restore routines we use. */
27353 static void
27354 rs6000_output_savres_externs (FILE *file)
27356 rs6000_stack_t *info = rs6000_stack_info ();
27358 if (TARGET_DEBUG_STACK)
27359 debug_stack_info (info);
27361 /* Write .extern for any function we will call to save and restore
27362 fp values. */
27363 if (info->first_fp_reg_save < 64
27364 && !TARGET_MACHO
27365 && !TARGET_ELF)
27367 char *name;
27368 int regno = info->first_fp_reg_save - 32;
27370 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27372 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27373 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27374 name = rs6000_savres_routine_name (regno, sel);
27375 fprintf (file, "\t.extern %s\n", name);
27377 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27379 bool lr = (info->savres_strategy
27380 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27381 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27382 name = rs6000_savres_routine_name (regno, sel);
27383 fprintf (file, "\t.extern %s\n", name);
27388 /* Write function prologue. */
27390 static void
27391 rs6000_output_function_prologue (FILE *file,
27392 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
27394 if (!cfun->is_thunk)
27395 rs6000_output_savres_externs (file);
27397 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27398 immediately after the global entry point label. */
27399 if (rs6000_global_entry_point_needed_p ())
27401 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27403 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27405 if (TARGET_CMODEL != CMODEL_LARGE)
27407 /* In the small and medium code models, we assume the TOC is less
27408 2 GB away from the text section, so it can be computed via the
27409 following two-instruction sequence. */
27410 char buf[256];
27412 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27413 fprintf (file, "0:\taddis 2,12,.TOC.-");
27414 assemble_name (file, buf);
27415 fprintf (file, "@ha\n");
27416 fprintf (file, "\taddi 2,2,.TOC.-");
27417 assemble_name (file, buf);
27418 fprintf (file, "@l\n");
27420 else
27422 /* In the large code model, we allow arbitrary offsets between the
27423 TOC and the text section, so we have to load the offset from
27424 memory. The data field is emitted directly before the global
27425 entry point in rs6000_elf_declare_function_name. */
27426 char buf[256];
27428 #ifdef HAVE_AS_ENTRY_MARKERS
27429 /* If supported by the linker, emit a marker relocation. If the
27430 total code size of the final executable or shared library
27431 happens to fit into 2 GB after all, the linker will replace
27432 this code sequence with the sequence for the small or medium
27433 code model. */
27434 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27435 #endif
27436 fprintf (file, "\tld 2,");
27437 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27438 assemble_name (file, buf);
27439 fprintf (file, "-");
27440 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27441 assemble_name (file, buf);
27442 fprintf (file, "(12)\n");
27443 fprintf (file, "\tadd 2,2,12\n");
27446 fputs ("\t.localentry\t", file);
27447 assemble_name (file, name);
27448 fputs (",.-", file);
27449 assemble_name (file, name);
27450 fputs ("\n", file);
27453 /* Output -mprofile-kernel code. This needs to be done here instead of
27454 in output_function_profile since it must go after the ELFv2 ABI
27455 local entry point. */
27456 if (TARGET_PROFILE_KERNEL && crtl->profile)
27458 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27459 gcc_assert (!TARGET_32BIT);
27461 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27463 /* In the ELFv2 ABI we have no compiler stack word. It must be
27464 the resposibility of _mcount to preserve the static chain
27465 register if required. */
27466 if (DEFAULT_ABI != ABI_ELFv2
27467 && cfun->static_chain_decl != NULL)
27469 asm_fprintf (file, "\tstd %s,24(%s)\n",
27470 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27471 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27472 asm_fprintf (file, "\tld %s,24(%s)\n",
27473 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27475 else
27476 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27479 rs6000_pic_labelno++;
27482 /* -mprofile-kernel code calls mcount before the function prolog,
27483 so a profiled leaf function should stay a leaf function. */
27484 static bool
27485 rs6000_keep_leaf_when_profiled ()
27487 return TARGET_PROFILE_KERNEL;
27490 /* Non-zero if vmx regs are restored before the frame pop, zero if
27491 we restore after the pop when possible. */
27492 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27494 /* Restoring cr is a two step process: loading a reg from the frame
27495 save, then moving the reg to cr. For ABI_V4 we must let the
27496 unwinder know that the stack location is no longer valid at or
27497 before the stack deallocation, but we can't emit a cfa_restore for
27498 cr at the stack deallocation like we do for other registers.
27499 The trouble is that it is possible for the move to cr to be
27500 scheduled after the stack deallocation. So say exactly where cr
27501 is located on each of the two insns. */
27503 static rtx
27504 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27506 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27507 rtx reg = gen_rtx_REG (SImode, regno);
27508 rtx_insn *insn = emit_move_insn (reg, mem);
27510 if (!exit_func && DEFAULT_ABI == ABI_V4)
27512 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27513 rtx set = gen_rtx_SET (reg, cr);
27515 add_reg_note (insn, REG_CFA_REGISTER, set);
27516 RTX_FRAME_RELATED_P (insn) = 1;
27518 return reg;
27521 /* Reload CR from REG. */
27523 static void
27524 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27526 int count = 0;
27527 int i;
27529 if (using_mfcr_multiple)
27531 for (i = 0; i < 8; i++)
27532 if (save_reg_p (CR0_REGNO + i))
27533 count++;
27534 gcc_assert (count);
27537 if (using_mfcr_multiple && count > 1)
27539 rtx_insn *insn;
27540 rtvec p;
27541 int ndx;
27543 p = rtvec_alloc (count);
27545 ndx = 0;
27546 for (i = 0; i < 8; i++)
27547 if (save_reg_p (CR0_REGNO + i))
27549 rtvec r = rtvec_alloc (2);
27550 RTVEC_ELT (r, 0) = reg;
27551 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27552 RTVEC_ELT (p, ndx) =
27553 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27554 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27555 ndx++;
27557 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27558 gcc_assert (ndx == count);
27560 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27561 CR field separately. */
27562 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27564 for (i = 0; i < 8; i++)
27565 if (save_reg_p (CR0_REGNO + i))
27566 add_reg_note (insn, REG_CFA_RESTORE,
27567 gen_rtx_REG (SImode, CR0_REGNO + i));
27569 RTX_FRAME_RELATED_P (insn) = 1;
27572 else
27573 for (i = 0; i < 8; i++)
27574 if (save_reg_p (CR0_REGNO + i))
27576 rtx insn = emit_insn (gen_movsi_to_cr_one
27577 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27579 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27580 CR field separately, attached to the insn that in fact
27581 restores this particular CR field. */
27582 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27584 add_reg_note (insn, REG_CFA_RESTORE,
27585 gen_rtx_REG (SImode, CR0_REGNO + i));
27587 RTX_FRAME_RELATED_P (insn) = 1;
27591 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27592 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27593 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27595 rtx_insn *insn = get_last_insn ();
27596 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27598 add_reg_note (insn, REG_CFA_RESTORE, cr);
27599 RTX_FRAME_RELATED_P (insn) = 1;
27603 /* Like cr, the move to lr instruction can be scheduled after the
27604 stack deallocation, but unlike cr, its stack frame save is still
27605 valid. So we only need to emit the cfa_restore on the correct
27606 instruction. */
27608 static void
27609 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27611 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27612 rtx reg = gen_rtx_REG (Pmode, regno);
27614 emit_move_insn (reg, mem);
27617 static void
27618 restore_saved_lr (int regno, bool exit_func)
27620 rtx reg = gen_rtx_REG (Pmode, regno);
27621 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27622 rtx_insn *insn = emit_move_insn (lr, reg);
27624 if (!exit_func && flag_shrink_wrap)
27626 add_reg_note (insn, REG_CFA_RESTORE, lr);
27627 RTX_FRAME_RELATED_P (insn) = 1;
27631 static rtx
27632 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27634 if (DEFAULT_ABI == ABI_ELFv2)
27636 int i;
27637 for (i = 0; i < 8; i++)
27638 if (save_reg_p (CR0_REGNO + i))
27640 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27641 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27642 cfa_restores);
27645 else if (info->cr_save_p)
27646 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27647 gen_rtx_REG (SImode, CR2_REGNO),
27648 cfa_restores);
27650 if (info->lr_save_p)
27651 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27652 gen_rtx_REG (Pmode, LR_REGNO),
27653 cfa_restores);
27654 return cfa_restores;
27657 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27658 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27659 below stack pointer not cloberred by signals. */
27661 static inline bool
27662 offset_below_red_zone_p (HOST_WIDE_INT offset)
27664 return offset < (DEFAULT_ABI == ABI_V4
27666 : TARGET_32BIT ? -220 : -288);
27669 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27671 static void
27672 emit_cfa_restores (rtx cfa_restores)
27674 rtx_insn *insn = get_last_insn ();
27675 rtx *loc = &REG_NOTES (insn);
27677 while (*loc)
27678 loc = &XEXP (*loc, 1);
27679 *loc = cfa_restores;
27680 RTX_FRAME_RELATED_P (insn) = 1;
27683 /* Emit function epilogue as insns. */
27685 void
27686 rs6000_emit_epilogue (int sibcall)
27688 rs6000_stack_t *info;
27689 int restoring_GPRs_inline;
27690 int restoring_FPRs_inline;
27691 int using_load_multiple;
27692 int using_mtcr_multiple;
27693 int use_backchain_to_restore_sp;
27694 int restore_lr;
27695 int strategy;
27696 HOST_WIDE_INT frame_off = 0;
27697 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27698 rtx frame_reg_rtx = sp_reg_rtx;
27699 rtx cfa_restores = NULL_RTX;
27700 rtx insn;
27701 rtx cr_save_reg = NULL_RTX;
27702 machine_mode reg_mode = Pmode;
27703 int reg_size = TARGET_32BIT ? 4 : 8;
27704 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27705 ? DFmode : SFmode;
27706 int fp_reg_size = 8;
27707 int i;
27708 bool exit_func;
27709 unsigned ptr_regno;
27711 info = rs6000_stack_info ();
27713 strategy = info->savres_strategy;
27714 using_load_multiple = strategy & REST_MULTIPLE;
27715 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27716 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27717 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
27718 || rs6000_cpu == PROCESSOR_PPC603
27719 || rs6000_cpu == PROCESSOR_PPC750
27720 || optimize_size);
27721 /* Restore via the backchain when we have a large frame, since this
27722 is more efficient than an addis, addi pair. The second condition
27723 here will not trigger at the moment; We don't actually need a
27724 frame pointer for alloca, but the generic parts of the compiler
27725 give us one anyway. */
27726 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27727 ? info->lr_save_offset
27728 : 0) > 32767
27729 || (cfun->calls_alloca
27730 && !frame_pointer_needed));
27731 restore_lr = (info->lr_save_p
27732 && (restoring_FPRs_inline
27733 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27734 && (restoring_GPRs_inline
27735 || info->first_fp_reg_save < 64)
27736 && !cfun->machine->lr_is_wrapped_separately);
27739 if (WORLD_SAVE_P (info))
27741 int i, j;
27742 char rname[30];
27743 const char *alloc_rname;
27744 rtvec p;
27746 /* eh_rest_world_r10 will return to the location saved in the LR
27747 stack slot (which is not likely to be our caller.)
27748 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27749 rest_world is similar, except any R10 parameter is ignored.
27750 The exception-handling stuff that was here in 2.95 is no
27751 longer necessary. */
27753 p = rtvec_alloc (9
27754 + 32 - info->first_gp_reg_save
27755 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27756 + 63 + 1 - info->first_fp_reg_save);
27758 strcpy (rname, ((crtl->calls_eh_return) ?
27759 "*eh_rest_world_r10" : "*rest_world"));
27760 alloc_rname = ggc_strdup (rname);
27762 j = 0;
27763 RTVEC_ELT (p, j++) = ret_rtx;
27764 RTVEC_ELT (p, j++)
27765 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27766 /* The instruction pattern requires a clobber here;
27767 it is shared with the restVEC helper. */
27768 RTVEC_ELT (p, j++)
27769 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27772 /* CR register traditionally saved as CR2. */
27773 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27774 RTVEC_ELT (p, j++)
27775 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27776 if (flag_shrink_wrap)
27778 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27779 gen_rtx_REG (Pmode, LR_REGNO),
27780 cfa_restores);
27781 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27785 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27787 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27788 RTVEC_ELT (p, j++)
27789 = gen_frame_load (reg,
27790 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27791 if (flag_shrink_wrap)
27792 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27794 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27796 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27797 RTVEC_ELT (p, j++)
27798 = gen_frame_load (reg,
27799 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27800 if (flag_shrink_wrap)
27801 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27803 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27805 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27806 ? DFmode : SFmode),
27807 info->first_fp_reg_save + i);
27808 RTVEC_ELT (p, j++)
27809 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27810 if (flag_shrink_wrap)
27811 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27813 RTVEC_ELT (p, j++)
27814 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27815 RTVEC_ELT (p, j++)
27816 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27817 RTVEC_ELT (p, j++)
27818 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27819 RTVEC_ELT (p, j++)
27820 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27821 RTVEC_ELT (p, j++)
27822 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27823 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27825 if (flag_shrink_wrap)
27827 REG_NOTES (insn) = cfa_restores;
27828 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27829 RTX_FRAME_RELATED_P (insn) = 1;
27831 return;
27834 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27835 if (info->push_p)
27836 frame_off = info->total_size;
27838 /* Restore AltiVec registers if we must do so before adjusting the
27839 stack. */
27840 if (info->altivec_size != 0
27841 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27842 || (DEFAULT_ABI != ABI_V4
27843 && offset_below_red_zone_p (info->altivec_save_offset))))
27845 int i;
27846 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27848 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27849 if (use_backchain_to_restore_sp)
27851 int frame_regno = 11;
27853 if ((strategy & REST_INLINE_VRS) == 0)
27855 /* Of r11 and r12, select the one not clobbered by an
27856 out-of-line restore function for the frame register. */
27857 frame_regno = 11 + 12 - scratch_regno;
27859 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27860 emit_move_insn (frame_reg_rtx,
27861 gen_rtx_MEM (Pmode, sp_reg_rtx));
27862 frame_off = 0;
27864 else if (frame_pointer_needed)
27865 frame_reg_rtx = hard_frame_pointer_rtx;
27867 if ((strategy & REST_INLINE_VRS) == 0)
27869 int end_save = info->altivec_save_offset + info->altivec_size;
27870 int ptr_off;
27871 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27872 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27874 if (end_save + frame_off != 0)
27876 rtx offset = GEN_INT (end_save + frame_off);
27878 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27880 else
27881 emit_move_insn (ptr_reg, frame_reg_rtx);
27883 ptr_off = -end_save;
27884 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27885 info->altivec_save_offset + ptr_off,
27886 0, V4SImode, SAVRES_VR);
27888 else
27890 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27891 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27893 rtx addr, areg, mem, insn;
27894 rtx reg = gen_rtx_REG (V4SImode, i);
27895 HOST_WIDE_INT offset
27896 = (info->altivec_save_offset + frame_off
27897 + 16 * (i - info->first_altivec_reg_save));
27899 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27901 mem = gen_frame_mem (V4SImode,
27902 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27903 GEN_INT (offset)));
27904 insn = gen_rtx_SET (reg, mem);
27906 else
27908 areg = gen_rtx_REG (Pmode, 0);
27909 emit_move_insn (areg, GEN_INT (offset));
27911 /* AltiVec addressing mode is [reg+reg]. */
27912 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27913 mem = gen_frame_mem (V4SImode, addr);
27915 /* Rather than emitting a generic move, force use of the
27916 lvx instruction, which we always want. In particular we
27917 don't want lxvd2x/xxpermdi for little endian. */
27918 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27921 (void) emit_insn (insn);
27925 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27926 if (((strategy & REST_INLINE_VRS) == 0
27927 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27928 && (flag_shrink_wrap
27929 || (offset_below_red_zone_p
27930 (info->altivec_save_offset
27931 + 16 * (i - info->first_altivec_reg_save)))))
27933 rtx reg = gen_rtx_REG (V4SImode, i);
27934 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27938 /* Restore VRSAVE if we must do so before adjusting the stack. */
27939 if (info->vrsave_size != 0
27940 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27941 || (DEFAULT_ABI != ABI_V4
27942 && offset_below_red_zone_p (info->vrsave_save_offset))))
27944 rtx reg;
27946 if (frame_reg_rtx == sp_reg_rtx)
27948 if (use_backchain_to_restore_sp)
27950 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27951 emit_move_insn (frame_reg_rtx,
27952 gen_rtx_MEM (Pmode, sp_reg_rtx));
27953 frame_off = 0;
27955 else if (frame_pointer_needed)
27956 frame_reg_rtx = hard_frame_pointer_rtx;
27959 reg = gen_rtx_REG (SImode, 12);
27960 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27961 info->vrsave_save_offset + frame_off));
27963 emit_insn (generate_set_vrsave (reg, info, 1));
27966 insn = NULL_RTX;
27967 /* If we have a large stack frame, restore the old stack pointer
27968 using the backchain. */
27969 if (use_backchain_to_restore_sp)
27971 if (frame_reg_rtx == sp_reg_rtx)
27973 /* Under V.4, don't reset the stack pointer until after we're done
27974 loading the saved registers. */
27975 if (DEFAULT_ABI == ABI_V4)
27976 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27978 insn = emit_move_insn (frame_reg_rtx,
27979 gen_rtx_MEM (Pmode, sp_reg_rtx));
27980 frame_off = 0;
27982 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27983 && DEFAULT_ABI == ABI_V4)
27984 /* frame_reg_rtx has been set up by the altivec restore. */
27986 else
27988 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
27989 frame_reg_rtx = sp_reg_rtx;
27992 /* If we have a frame pointer, we can restore the old stack pointer
27993 from it. */
27994 else if (frame_pointer_needed)
27996 frame_reg_rtx = sp_reg_rtx;
27997 if (DEFAULT_ABI == ABI_V4)
27998 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27999 /* Prevent reordering memory accesses against stack pointer restore. */
28000 else if (cfun->calls_alloca
28001 || offset_below_red_zone_p (-info->total_size))
28002 rs6000_emit_stack_tie (frame_reg_rtx, true);
28004 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28005 GEN_INT (info->total_size)));
28006 frame_off = 0;
28008 else if (info->push_p
28009 && DEFAULT_ABI != ABI_V4
28010 && !crtl->calls_eh_return)
28012 /* Prevent reordering memory accesses against stack pointer restore. */
28013 if (cfun->calls_alloca
28014 || offset_below_red_zone_p (-info->total_size))
28015 rs6000_emit_stack_tie (frame_reg_rtx, false);
28016 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28017 GEN_INT (info->total_size)));
28018 frame_off = 0;
28020 if (insn && frame_reg_rtx == sp_reg_rtx)
28022 if (cfa_restores)
28024 REG_NOTES (insn) = cfa_restores;
28025 cfa_restores = NULL_RTX;
28027 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28028 RTX_FRAME_RELATED_P (insn) = 1;
28031 /* Restore AltiVec registers if we have not done so already. */
28032 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28033 && info->altivec_size != 0
28034 && (DEFAULT_ABI == ABI_V4
28035 || !offset_below_red_zone_p (info->altivec_save_offset)))
28037 int i;
28039 if ((strategy & REST_INLINE_VRS) == 0)
28041 int end_save = info->altivec_save_offset + info->altivec_size;
28042 int ptr_off;
28043 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28044 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28045 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28047 if (end_save + frame_off != 0)
28049 rtx offset = GEN_INT (end_save + frame_off);
28051 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28053 else
28054 emit_move_insn (ptr_reg, frame_reg_rtx);
28056 ptr_off = -end_save;
28057 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28058 info->altivec_save_offset + ptr_off,
28059 0, V4SImode, SAVRES_VR);
28060 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28062 /* Frame reg was clobbered by out-of-line save. Restore it
28063 from ptr_reg, and if we are calling out-of-line gpr or
28064 fpr restore set up the correct pointer and offset. */
28065 unsigned newptr_regno = 1;
28066 if (!restoring_GPRs_inline)
28068 bool lr = info->gp_save_offset + info->gp_size == 0;
28069 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28070 newptr_regno = ptr_regno_for_savres (sel);
28071 end_save = info->gp_save_offset + info->gp_size;
28073 else if (!restoring_FPRs_inline)
28075 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28076 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28077 newptr_regno = ptr_regno_for_savres (sel);
28078 end_save = info->fp_save_offset + info->fp_size;
28081 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28082 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28084 if (end_save + ptr_off != 0)
28086 rtx offset = GEN_INT (end_save + ptr_off);
28088 frame_off = -end_save;
28089 if (TARGET_32BIT)
28090 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28091 ptr_reg, offset));
28092 else
28093 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28094 ptr_reg, offset));
28096 else
28098 frame_off = ptr_off;
28099 emit_move_insn (frame_reg_rtx, ptr_reg);
28103 else
28105 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28106 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28108 rtx addr, areg, mem, insn;
28109 rtx reg = gen_rtx_REG (V4SImode, i);
28110 HOST_WIDE_INT offset
28111 = (info->altivec_save_offset + frame_off
28112 + 16 * (i - info->first_altivec_reg_save));
28114 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
28116 mem = gen_frame_mem (V4SImode,
28117 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28118 GEN_INT (offset)));
28119 insn = gen_rtx_SET (reg, mem);
28121 else
28123 areg = gen_rtx_REG (Pmode, 0);
28124 emit_move_insn (areg, GEN_INT (offset));
28126 /* AltiVec addressing mode is [reg+reg]. */
28127 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28128 mem = gen_frame_mem (V4SImode, addr);
28130 /* Rather than emitting a generic move, force use of the
28131 lvx instruction, which we always want. In particular we
28132 don't want lxvd2x/xxpermdi for little endian. */
28133 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28136 (void) emit_insn (insn);
28140 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28141 if (((strategy & REST_INLINE_VRS) == 0
28142 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28143 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28145 rtx reg = gen_rtx_REG (V4SImode, i);
28146 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28150 /* Restore VRSAVE if we have not done so already. */
28151 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28152 && info->vrsave_size != 0
28153 && (DEFAULT_ABI == ABI_V4
28154 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28156 rtx reg;
28158 reg = gen_rtx_REG (SImode, 12);
28159 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28160 info->vrsave_save_offset + frame_off));
28162 emit_insn (generate_set_vrsave (reg, info, 1));
28165 /* If we exit by an out-of-line restore function on ABI_V4 then that
28166 function will deallocate the stack, so we don't need to worry
28167 about the unwinder restoring cr from an invalid stack frame
28168 location. */
28169 exit_func = (!restoring_FPRs_inline
28170 || (!restoring_GPRs_inline
28171 && info->first_fp_reg_save == 64));
28173 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28174 *separate* slots if the routine calls __builtin_eh_return, so
28175 that they can be independently restored by the unwinder. */
28176 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28178 int i, cr_off = info->ehcr_offset;
28180 for (i = 0; i < 8; i++)
28181 if (!call_used_regs[CR0_REGNO + i])
28183 rtx reg = gen_rtx_REG (SImode, 0);
28184 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28185 cr_off + frame_off));
28187 insn = emit_insn (gen_movsi_to_cr_one
28188 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28190 if (!exit_func && flag_shrink_wrap)
28192 add_reg_note (insn, REG_CFA_RESTORE,
28193 gen_rtx_REG (SImode, CR0_REGNO + i));
28195 RTX_FRAME_RELATED_P (insn) = 1;
28198 cr_off += reg_size;
28202 /* Get the old lr if we saved it. If we are restoring registers
28203 out-of-line, then the out-of-line routines can do this for us. */
28204 if (restore_lr && restoring_GPRs_inline)
28205 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28207 /* Get the old cr if we saved it. */
28208 if (info->cr_save_p)
28210 unsigned cr_save_regno = 12;
28212 if (!restoring_GPRs_inline)
28214 /* Ensure we don't use the register used by the out-of-line
28215 gpr register restore below. */
28216 bool lr = info->gp_save_offset + info->gp_size == 0;
28217 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28218 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28220 if (gpr_ptr_regno == 12)
28221 cr_save_regno = 11;
28222 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28224 else if (REGNO (frame_reg_rtx) == 12)
28225 cr_save_regno = 11;
28227 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28228 info->cr_save_offset + frame_off,
28229 exit_func);
28232 /* Set LR here to try to overlap restores below. */
28233 if (restore_lr && restoring_GPRs_inline)
28234 restore_saved_lr (0, exit_func);
28236 /* Load exception handler data registers, if needed. */
28237 if (crtl->calls_eh_return)
28239 unsigned int i, regno;
28241 if (TARGET_AIX)
28243 rtx reg = gen_rtx_REG (reg_mode, 2);
28244 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28245 frame_off + RS6000_TOC_SAVE_SLOT));
28248 for (i = 0; ; ++i)
28250 rtx mem;
28252 regno = EH_RETURN_DATA_REGNO (i);
28253 if (regno == INVALID_REGNUM)
28254 break;
28256 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28257 info->ehrd_offset + frame_off
28258 + reg_size * (int) i);
28260 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28264 /* Restore GPRs. This is done as a PARALLEL if we are using
28265 the load-multiple instructions. */
28266 if (!restoring_GPRs_inline)
28268 /* We are jumping to an out-of-line function. */
28269 rtx ptr_reg;
28270 int end_save = info->gp_save_offset + info->gp_size;
28271 bool can_use_exit = end_save == 0;
28272 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28273 int ptr_off;
28275 /* Emit stack reset code if we need it. */
28276 ptr_regno = ptr_regno_for_savres (sel);
28277 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28278 if (can_use_exit)
28279 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28280 else if (end_save + frame_off != 0)
28281 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28282 GEN_INT (end_save + frame_off)));
28283 else if (REGNO (frame_reg_rtx) != ptr_regno)
28284 emit_move_insn (ptr_reg, frame_reg_rtx);
28285 if (REGNO (frame_reg_rtx) == ptr_regno)
28286 frame_off = -end_save;
28288 if (can_use_exit && info->cr_save_p)
28289 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28291 ptr_off = -end_save;
28292 rs6000_emit_savres_rtx (info, ptr_reg,
28293 info->gp_save_offset + ptr_off,
28294 info->lr_save_offset + ptr_off,
28295 reg_mode, sel);
28297 else if (using_load_multiple)
28299 rtvec p;
28300 p = rtvec_alloc (32 - info->first_gp_reg_save);
28301 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28302 RTVEC_ELT (p, i)
28303 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28304 frame_reg_rtx,
28305 info->gp_save_offset + frame_off + reg_size * i);
28306 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28308 else
28310 int offset = info->gp_save_offset + frame_off;
28311 for (i = info->first_gp_reg_save; i < 32; i++)
28313 if (rs6000_reg_live_or_pic_offset_p (i)
28314 && !cfun->machine->gpr_is_wrapped_separately[i])
28316 rtx reg = gen_rtx_REG (reg_mode, i);
28317 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28320 offset += reg_size;
28324 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28326 /* If the frame pointer was used then we can't delay emitting
28327 a REG_CFA_DEF_CFA note. This must happen on the insn that
28328 restores the frame pointer, r31. We may have already emitted
28329 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28330 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28331 be harmless if emitted. */
28332 if (frame_pointer_needed)
28334 insn = get_last_insn ();
28335 add_reg_note (insn, REG_CFA_DEF_CFA,
28336 plus_constant (Pmode, frame_reg_rtx, frame_off));
28337 RTX_FRAME_RELATED_P (insn) = 1;
28340 /* Set up cfa_restores. We always need these when
28341 shrink-wrapping. If not shrink-wrapping then we only need
28342 the cfa_restore when the stack location is no longer valid.
28343 The cfa_restores must be emitted on or before the insn that
28344 invalidates the stack, and of course must not be emitted
28345 before the insn that actually does the restore. The latter
28346 is why it is a bad idea to emit the cfa_restores as a group
28347 on the last instruction here that actually does a restore:
28348 That insn may be reordered with respect to others doing
28349 restores. */
28350 if (flag_shrink_wrap
28351 && !restoring_GPRs_inline
28352 && info->first_fp_reg_save == 64)
28353 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28355 for (i = info->first_gp_reg_save; i < 32; i++)
28356 if (!restoring_GPRs_inline
28357 || using_load_multiple
28358 || rs6000_reg_live_or_pic_offset_p (i))
28360 if (cfun->machine->gpr_is_wrapped_separately[i])
28361 continue;
28363 rtx reg = gen_rtx_REG (reg_mode, i);
28364 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28368 if (!restoring_GPRs_inline
28369 && info->first_fp_reg_save == 64)
28371 /* We are jumping to an out-of-line function. */
28372 if (cfa_restores)
28373 emit_cfa_restores (cfa_restores);
28374 return;
28377 if (restore_lr && !restoring_GPRs_inline)
28379 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28380 restore_saved_lr (0, exit_func);
28383 /* Restore fpr's if we need to do it without calling a function. */
28384 if (restoring_FPRs_inline)
28386 int offset = info->fp_save_offset + frame_off;
28387 for (i = info->first_fp_reg_save; i < 64; i++)
28389 if (save_reg_p (i)
28390 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28392 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28393 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28394 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28395 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28396 cfa_restores);
28399 offset += fp_reg_size;
28403 /* If we saved cr, restore it here. Just those that were used. */
28404 if (info->cr_save_p)
28405 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28407 /* If this is V.4, unwind the stack pointer after all of the loads
28408 have been done, or set up r11 if we are restoring fp out of line. */
28409 ptr_regno = 1;
28410 if (!restoring_FPRs_inline)
28412 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28413 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28414 ptr_regno = ptr_regno_for_savres (sel);
28417 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28418 if (REGNO (frame_reg_rtx) == ptr_regno)
28419 frame_off = 0;
28421 if (insn && restoring_FPRs_inline)
28423 if (cfa_restores)
28425 REG_NOTES (insn) = cfa_restores;
28426 cfa_restores = NULL_RTX;
28428 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28429 RTX_FRAME_RELATED_P (insn) = 1;
28432 if (crtl->calls_eh_return)
28434 rtx sa = EH_RETURN_STACKADJ_RTX;
28435 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28438 if (!sibcall && restoring_FPRs_inline)
28440 if (cfa_restores)
28442 /* We can't hang the cfa_restores off a simple return,
28443 since the shrink-wrap code sometimes uses an existing
28444 return. This means there might be a path from
28445 pre-prologue code to this return, and dwarf2cfi code
28446 wants the eh_frame unwinder state to be the same on
28447 all paths to any point. So we need to emit the
28448 cfa_restores before the return. For -m64 we really
28449 don't need epilogue cfa_restores at all, except for
28450 this irritating dwarf2cfi with shrink-wrap
28451 requirement; The stack red-zone means eh_frame info
28452 from the prologue telling the unwinder to restore
28453 from the stack is perfectly good right to the end of
28454 the function. */
28455 emit_insn (gen_blockage ());
28456 emit_cfa_restores (cfa_restores);
28457 cfa_restores = NULL_RTX;
28460 emit_jump_insn (targetm.gen_simple_return ());
28463 if (!sibcall && !restoring_FPRs_inline)
28465 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28466 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28467 int elt = 0;
28468 RTVEC_ELT (p, elt++) = ret_rtx;
28469 if (lr)
28470 RTVEC_ELT (p, elt++)
28471 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28473 /* We have to restore more than two FP registers, so branch to the
28474 restore function. It will return to our caller. */
28475 int i;
28476 int reg;
28477 rtx sym;
28479 if (flag_shrink_wrap)
28480 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28482 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28483 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28484 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28485 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28487 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28489 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28491 RTVEC_ELT (p, elt++)
28492 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28493 if (flag_shrink_wrap)
28494 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28497 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28500 if (cfa_restores)
28502 if (sibcall)
28503 /* Ensure the cfa_restores are hung off an insn that won't
28504 be reordered above other restores. */
28505 emit_insn (gen_blockage ());
28507 emit_cfa_restores (cfa_restores);
28511 /* Write function epilogue. */
28513 static void
28514 rs6000_output_function_epilogue (FILE *file,
28515 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
28517 #if TARGET_MACHO
28518 macho_branch_islands ();
28521 rtx_insn *insn = get_last_insn ();
28522 rtx_insn *deleted_debug_label = NULL;
28524 /* Mach-O doesn't support labels at the end of objects, so if
28525 it looks like we might want one, take special action.
28527 First, collect any sequence of deleted debug labels. */
28528 while (insn
28529 && NOTE_P (insn)
28530 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28532 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28533 notes only, instead set their CODE_LABEL_NUMBER to -1,
28534 otherwise there would be code generation differences
28535 in between -g and -g0. */
28536 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28537 deleted_debug_label = insn;
28538 insn = PREV_INSN (insn);
28541 /* Second, if we have:
28542 label:
28543 barrier
28544 then this needs to be detected, so skip past the barrier. */
28546 if (insn && BARRIER_P (insn))
28547 insn = PREV_INSN (insn);
28549 /* Up to now we've only seen notes or barriers. */
28550 if (insn)
28552 if (LABEL_P (insn)
28553 || (NOTE_P (insn)
28554 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28555 /* Trailing label: <barrier>. */
28556 fputs ("\tnop\n", file);
28557 else
28559 /* Lastly, see if we have a completely empty function body. */
28560 while (insn && ! INSN_P (insn))
28561 insn = PREV_INSN (insn);
28562 /* If we don't find any insns, we've got an empty function body;
28563 I.e. completely empty - without a return or branch. This is
28564 taken as the case where a function body has been removed
28565 because it contains an inline __builtin_unreachable(). GCC
28566 states that reaching __builtin_unreachable() means UB so we're
28567 not obliged to do anything special; however, we want
28568 non-zero-sized function bodies. To meet this, and help the
28569 user out, let's trap the case. */
28570 if (insn == NULL)
28571 fputs ("\ttrap\n", file);
28574 else if (deleted_debug_label)
28575 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28576 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28577 CODE_LABEL_NUMBER (insn) = -1;
28579 #endif
28581 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28582 on its format.
28584 We don't output a traceback table if -finhibit-size-directive was
28585 used. The documentation for -finhibit-size-directive reads
28586 ``don't output a @code{.size} assembler directive, or anything
28587 else that would cause trouble if the function is split in the
28588 middle, and the two halves are placed at locations far apart in
28589 memory.'' The traceback table has this property, since it
28590 includes the offset from the start of the function to the
28591 traceback table itself.
28593 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28594 different traceback table. */
28595 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28596 && ! flag_inhibit_size_directive
28597 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28599 const char *fname = NULL;
28600 const char *language_string = lang_hooks.name;
28601 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28602 int i;
28603 int optional_tbtab;
28604 rs6000_stack_t *info = rs6000_stack_info ();
28606 if (rs6000_traceback == traceback_full)
28607 optional_tbtab = 1;
28608 else if (rs6000_traceback == traceback_part)
28609 optional_tbtab = 0;
28610 else
28611 optional_tbtab = !optimize_size && !TARGET_ELF;
28613 if (optional_tbtab)
28615 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28616 while (*fname == '.') /* V.4 encodes . in the name */
28617 fname++;
28619 /* Need label immediately before tbtab, so we can compute
28620 its offset from the function start. */
28621 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28622 ASM_OUTPUT_LABEL (file, fname);
28625 /* The .tbtab pseudo-op can only be used for the first eight
28626 expressions, since it can't handle the possibly variable
28627 length fields that follow. However, if you omit the optional
28628 fields, the assembler outputs zeros for all optional fields
28629 anyways, giving each variable length field is minimum length
28630 (as defined in sys/debug.h). Thus we can not use the .tbtab
28631 pseudo-op at all. */
28633 /* An all-zero word flags the start of the tbtab, for debuggers
28634 that have to find it by searching forward from the entry
28635 point or from the current pc. */
28636 fputs ("\t.long 0\n", file);
28638 /* Tbtab format type. Use format type 0. */
28639 fputs ("\t.byte 0,", file);
28641 /* Language type. Unfortunately, there does not seem to be any
28642 official way to discover the language being compiled, so we
28643 use language_string.
28644 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28645 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28646 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28647 either, so for now use 0. */
28648 if (lang_GNU_C ()
28649 || ! strcmp (language_string, "GNU GIMPLE")
28650 || ! strcmp (language_string, "GNU Go")
28651 || ! strcmp (language_string, "libgccjit"))
28652 i = 0;
28653 else if (! strcmp (language_string, "GNU F77")
28654 || lang_GNU_Fortran ())
28655 i = 1;
28656 else if (! strcmp (language_string, "GNU Pascal"))
28657 i = 2;
28658 else if (! strcmp (language_string, "GNU Ada"))
28659 i = 3;
28660 else if (lang_GNU_CXX ()
28661 || ! strcmp (language_string, "GNU Objective-C++"))
28662 i = 9;
28663 else if (! strcmp (language_string, "GNU Java"))
28664 i = 13;
28665 else if (! strcmp (language_string, "GNU Objective-C"))
28666 i = 14;
28667 else
28668 gcc_unreachable ();
28669 fprintf (file, "%d,", i);
28671 /* 8 single bit fields: global linkage (not set for C extern linkage,
28672 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28673 from start of procedure stored in tbtab, internal function, function
28674 has controlled storage, function has no toc, function uses fp,
28675 function logs/aborts fp operations. */
28676 /* Assume that fp operations are used if any fp reg must be saved. */
28677 fprintf (file, "%d,",
28678 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28680 /* 6 bitfields: function is interrupt handler, name present in
28681 proc table, function calls alloca, on condition directives
28682 (controls stack walks, 3 bits), saves condition reg, saves
28683 link reg. */
28684 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28685 set up as a frame pointer, even when there is no alloca call. */
28686 fprintf (file, "%d,",
28687 ((optional_tbtab << 6)
28688 | ((optional_tbtab & frame_pointer_needed) << 5)
28689 | (info->cr_save_p << 1)
28690 | (info->lr_save_p)));
28692 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28693 (6 bits). */
28694 fprintf (file, "%d,",
28695 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28697 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28698 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28700 if (optional_tbtab)
28702 /* Compute the parameter info from the function decl argument
28703 list. */
28704 tree decl;
28705 int next_parm_info_bit = 31;
28707 for (decl = DECL_ARGUMENTS (current_function_decl);
28708 decl; decl = DECL_CHAIN (decl))
28710 rtx parameter = DECL_INCOMING_RTL (decl);
28711 machine_mode mode = GET_MODE (parameter);
28713 if (GET_CODE (parameter) == REG)
28715 if (SCALAR_FLOAT_MODE_P (mode))
28717 int bits;
28719 float_parms++;
28721 switch (mode)
28723 case SFmode:
28724 case SDmode:
28725 bits = 0x2;
28726 break;
28728 case DFmode:
28729 case DDmode:
28730 case TFmode:
28731 case TDmode:
28732 case IFmode:
28733 case KFmode:
28734 bits = 0x3;
28735 break;
28737 default:
28738 gcc_unreachable ();
28741 /* If only one bit will fit, don't or in this entry. */
28742 if (next_parm_info_bit > 0)
28743 parm_info |= (bits << (next_parm_info_bit - 1));
28744 next_parm_info_bit -= 2;
28746 else
28748 fixed_parms += ((GET_MODE_SIZE (mode)
28749 + (UNITS_PER_WORD - 1))
28750 / UNITS_PER_WORD);
28751 next_parm_info_bit -= 1;
28757 /* Number of fixed point parameters. */
28758 /* This is actually the number of words of fixed point parameters; thus
28759 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28760 fprintf (file, "%d,", fixed_parms);
28762 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28763 all on stack. */
28764 /* This is actually the number of fp registers that hold parameters;
28765 and thus the maximum value is 13. */
28766 /* Set parameters on stack bit if parameters are not in their original
28767 registers, regardless of whether they are on the stack? Xlc
28768 seems to set the bit when not optimizing. */
28769 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28771 if (optional_tbtab)
28773 /* Optional fields follow. Some are variable length. */
28775 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28776 float, 11 double float. */
28777 /* There is an entry for each parameter in a register, in the order
28778 that they occur in the parameter list. Any intervening arguments
28779 on the stack are ignored. If the list overflows a long (max
28780 possible length 34 bits) then completely leave off all elements
28781 that don't fit. */
28782 /* Only emit this long if there was at least one parameter. */
28783 if (fixed_parms || float_parms)
28784 fprintf (file, "\t.long %d\n", parm_info);
28786 /* Offset from start of code to tb table. */
28787 fputs ("\t.long ", file);
28788 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28789 RS6000_OUTPUT_BASENAME (file, fname);
28790 putc ('-', file);
28791 rs6000_output_function_entry (file, fname);
28792 putc ('\n', file);
28794 /* Interrupt handler mask. */
28795 /* Omit this long, since we never set the interrupt handler bit
28796 above. */
28798 /* Number of CTL (controlled storage) anchors. */
28799 /* Omit this long, since the has_ctl bit is never set above. */
28801 /* Displacement into stack of each CTL anchor. */
28802 /* Omit this list of longs, because there are no CTL anchors. */
28804 /* Length of function name. */
28805 if (*fname == '*')
28806 ++fname;
28807 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28809 /* Function name. */
28810 assemble_string (fname, strlen (fname));
28812 /* Register for alloca automatic storage; this is always reg 31.
28813 Only emit this if the alloca bit was set above. */
28814 if (frame_pointer_needed)
28815 fputs ("\t.byte 31\n", file);
28817 fputs ("\t.align 2\n", file);
28821 /* Arrange to define .LCTOC1 label, if not already done. */
28822 if (need_toc_init)
28824 need_toc_init = 0;
28825 if (!toc_initialized)
28827 switch_to_section (toc_section);
28828 switch_to_section (current_function_section ());
28833 /* -fsplit-stack support. */
28835 /* A SYMBOL_REF for __morestack. */
28836 static GTY(()) rtx morestack_ref;
28838 static rtx
28839 gen_add3_const (rtx rt, rtx ra, long c)
28841 if (TARGET_64BIT)
28842 return gen_adddi3 (rt, ra, GEN_INT (c));
28843 else
28844 return gen_addsi3 (rt, ra, GEN_INT (c));
28847 /* Emit -fsplit-stack prologue, which goes before the regular function
28848 prologue (at local entry point in the case of ELFv2). */
28850 void
28851 rs6000_expand_split_stack_prologue (void)
28853 rs6000_stack_t *info = rs6000_stack_info ();
28854 unsigned HOST_WIDE_INT allocate;
28855 long alloc_hi, alloc_lo;
28856 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28857 rtx_insn *insn;
28859 gcc_assert (flag_split_stack && reload_completed);
28861 if (!info->push_p)
28862 return;
28864 if (global_regs[29])
28866 error ("-fsplit-stack uses register r29");
28867 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28868 "conflicts with %qD", global_regs_decl[29]);
28871 allocate = info->total_size;
28872 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28874 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28875 return;
28877 if (morestack_ref == NULL_RTX)
28879 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28880 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
28881 | SYMBOL_FLAG_FUNCTION);
28884 r0 = gen_rtx_REG (Pmode, 0);
28885 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28886 r12 = gen_rtx_REG (Pmode, 12);
28887 emit_insn (gen_load_split_stack_limit (r0));
28888 /* Always emit two insns here to calculate the requested stack,
28889 so that the linker can edit them when adjusting size for calling
28890 non-split-stack code. */
28891 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
28892 alloc_lo = -allocate - alloc_hi;
28893 if (alloc_hi != 0)
28895 emit_insn (gen_add3_const (r12, r1, alloc_hi));
28896 if (alloc_lo != 0)
28897 emit_insn (gen_add3_const (r12, r12, alloc_lo));
28898 else
28899 emit_insn (gen_nop ());
28901 else
28903 emit_insn (gen_add3_const (r12, r1, alloc_lo));
28904 emit_insn (gen_nop ());
28907 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
28908 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
28909 ok_label = gen_label_rtx ();
28910 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28911 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
28912 gen_rtx_LABEL_REF (VOIDmode, ok_label),
28913 pc_rtx);
28914 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28915 JUMP_LABEL (insn) = ok_label;
28916 /* Mark the jump as very likely to be taken. */
28917 add_reg_br_prob_note (insn, profile_probability::very_likely ());
28919 lr = gen_rtx_REG (Pmode, LR_REGNO);
28920 insn = emit_move_insn (r0, lr);
28921 RTX_FRAME_RELATED_P (insn) = 1;
28922 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
28923 RTX_FRAME_RELATED_P (insn) = 1;
28925 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
28926 const0_rtx, const0_rtx));
28927 call_fusage = NULL_RTX;
28928 use_reg (&call_fusage, r12);
28929 /* Say the call uses r0, even though it doesn't, to stop regrename
28930 from twiddling with the insns saving lr, trashing args for cfun.
28931 The insns restoring lr are similarly protected by making
28932 split_stack_return use r0. */
28933 use_reg (&call_fusage, r0);
28934 add_function_usage_to (insn, call_fusage);
28935 /* Indicate that this function can't jump to non-local gotos. */
28936 make_reg_eh_region_note_nothrow_nononlocal (insn);
28937 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
28938 insn = emit_move_insn (lr, r0);
28939 add_reg_note (insn, REG_CFA_RESTORE, lr);
28940 RTX_FRAME_RELATED_P (insn) = 1;
28941 emit_insn (gen_split_stack_return ());
28943 emit_label (ok_label);
28944 LABEL_NUSES (ok_label) = 1;
28947 /* Return the internal arg pointer used for function incoming
28948 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28949 to copy it to a pseudo in order for it to be preserved over calls
28950 and suchlike. We'd really like to use a pseudo here for the
28951 internal arg pointer but data-flow analysis is not prepared to
28952 accept pseudos as live at the beginning of a function. */
28954 static rtx
28955 rs6000_internal_arg_pointer (void)
28957 if (flag_split_stack
28958 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
28959 == NULL))
28962 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
28964 rtx pat;
28966 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
28967 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
28969 /* Put the pseudo initialization right after the note at the
28970 beginning of the function. */
28971 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
28972 gen_rtx_REG (Pmode, 12));
28973 push_topmost_sequence ();
28974 emit_insn_after (pat, get_insns ());
28975 pop_topmost_sequence ();
28977 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
28978 FIRST_PARM_OFFSET (current_function_decl));
28980 return virtual_incoming_args_rtx;
28983 /* We may have to tell the dataflow pass that the split stack prologue
28984 is initializing a register. */
28986 static void
28987 rs6000_live_on_entry (bitmap regs)
28989 if (flag_split_stack)
28990 bitmap_set_bit (regs, 12);
28993 /* Emit -fsplit-stack dynamic stack allocation space check. */
28995 void
28996 rs6000_split_stack_space_check (rtx size, rtx label)
28998 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28999 rtx limit = gen_reg_rtx (Pmode);
29000 rtx requested = gen_reg_rtx (Pmode);
29001 rtx cmp = gen_reg_rtx (CCUNSmode);
29002 rtx jump;
29004 emit_insn (gen_load_split_stack_limit (limit));
29005 if (CONST_INT_P (size))
29006 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29007 else
29009 size = force_reg (Pmode, size);
29010 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29012 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29013 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29014 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29015 gen_rtx_LABEL_REF (VOIDmode, label),
29016 pc_rtx);
29017 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29018 JUMP_LABEL (jump) = label;
29021 /* A C compound statement that outputs the assembler code for a thunk
29022 function, used to implement C++ virtual function calls with
29023 multiple inheritance. The thunk acts as a wrapper around a virtual
29024 function, adjusting the implicit object parameter before handing
29025 control off to the real function.
29027 First, emit code to add the integer DELTA to the location that
29028 contains the incoming first argument. Assume that this argument
29029 contains a pointer, and is the one used to pass the `this' pointer
29030 in C++. This is the incoming argument *before* the function
29031 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29032 values of all other incoming arguments.
29034 After the addition, emit code to jump to FUNCTION, which is a
29035 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29036 not touch the return address. Hence returning from FUNCTION will
29037 return to whoever called the current `thunk'.
29039 The effect must be as if FUNCTION had been called directly with the
29040 adjusted first argument. This macro is responsible for emitting
29041 all of the code for a thunk function; output_function_prologue()
29042 and output_function_epilogue() are not invoked.
29044 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29045 been extracted from it.) It might possibly be useful on some
29046 targets, but probably not.
29048 If you do not define this macro, the target-independent code in the
29049 C++ frontend will generate a less efficient heavyweight thunk that
29050 calls FUNCTION instead of jumping to it. The generic approach does
29051 not support varargs. */
29053 static void
29054 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29055 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29056 tree function)
29058 rtx this_rtx, funexp;
29059 rtx_insn *insn;
29061 reload_completed = 1;
29062 epilogue_completed = 1;
29064 /* Mark the end of the (empty) prologue. */
29065 emit_note (NOTE_INSN_PROLOGUE_END);
29067 /* Find the "this" pointer. If the function returns a structure,
29068 the structure return pointer is in r3. */
29069 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29070 this_rtx = gen_rtx_REG (Pmode, 4);
29071 else
29072 this_rtx = gen_rtx_REG (Pmode, 3);
29074 /* Apply the constant offset, if required. */
29075 if (delta)
29076 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29078 /* Apply the offset from the vtable, if required. */
29079 if (vcall_offset)
29081 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29082 rtx tmp = gen_rtx_REG (Pmode, 12);
29084 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29085 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29087 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29088 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29090 else
29092 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29094 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29096 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29099 /* Generate a tail call to the target function. */
29100 if (!TREE_USED (function))
29102 assemble_external (function);
29103 TREE_USED (function) = 1;
29105 funexp = XEXP (DECL_RTL (function), 0);
29106 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29108 #if TARGET_MACHO
29109 if (MACHOPIC_INDIRECT)
29110 funexp = machopic_indirect_call_target (funexp);
29111 #endif
29113 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29114 generate sibcall RTL explicitly. */
29115 insn = emit_call_insn (
29116 gen_rtx_PARALLEL (VOIDmode,
29117 gen_rtvec (3,
29118 gen_rtx_CALL (VOIDmode,
29119 funexp, const0_rtx),
29120 gen_rtx_USE (VOIDmode, const0_rtx),
29121 simple_return_rtx)));
29122 SIBLING_CALL_P (insn) = 1;
29123 emit_barrier ();
29125 /* Run just enough of rest_of_compilation to get the insns emitted.
29126 There's not really enough bulk here to make other passes such as
29127 instruction scheduling worth while. Note that use_thunk calls
29128 assemble_start_function and assemble_end_function. */
29129 insn = get_insns ();
29130 shorten_branches (insn);
29131 final_start_function (insn, file, 1);
29132 final (insn, file, 1);
29133 final_end_function ();
29135 reload_completed = 0;
29136 epilogue_completed = 0;
29139 /* A quick summary of the various types of 'constant-pool tables'
29140 under PowerPC:
29142 Target Flags Name One table per
29143 AIX (none) AIX TOC object file
29144 AIX -mfull-toc AIX TOC object file
29145 AIX -mminimal-toc AIX minimal TOC translation unit
29146 SVR4/EABI (none) SVR4 SDATA object file
29147 SVR4/EABI -fpic SVR4 pic object file
29148 SVR4/EABI -fPIC SVR4 PIC translation unit
29149 SVR4/EABI -mrelocatable EABI TOC function
29150 SVR4/EABI -maix AIX TOC object file
29151 SVR4/EABI -maix -mminimal-toc
29152 AIX minimal TOC translation unit
29154 Name Reg. Set by entries contains:
29155 made by addrs? fp? sum?
29157 AIX TOC 2 crt0 as Y option option
29158 AIX minimal TOC 30 prolog gcc Y Y option
29159 SVR4 SDATA 13 crt0 gcc N Y N
29160 SVR4 pic 30 prolog ld Y not yet N
29161 SVR4 PIC 30 prolog gcc Y option option
29162 EABI TOC 30 prolog gcc Y option option
29166 /* Hash functions for the hash table. */
29168 static unsigned
29169 rs6000_hash_constant (rtx k)
29171 enum rtx_code code = GET_CODE (k);
29172 machine_mode mode = GET_MODE (k);
29173 unsigned result = (code << 3) ^ mode;
29174 const char *format;
29175 int flen, fidx;
29177 format = GET_RTX_FORMAT (code);
29178 flen = strlen (format);
29179 fidx = 0;
29181 switch (code)
29183 case LABEL_REF:
29184 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29186 case CONST_WIDE_INT:
29188 int i;
29189 flen = CONST_WIDE_INT_NUNITS (k);
29190 for (i = 0; i < flen; i++)
29191 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29192 return result;
29195 case CONST_DOUBLE:
29196 if (mode != VOIDmode)
29197 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29198 flen = 2;
29199 break;
29201 case CODE_LABEL:
29202 fidx = 3;
29203 break;
29205 default:
29206 break;
29209 for (; fidx < flen; fidx++)
29210 switch (format[fidx])
29212 case 's':
29214 unsigned i, len;
29215 const char *str = XSTR (k, fidx);
29216 len = strlen (str);
29217 result = result * 613 + len;
29218 for (i = 0; i < len; i++)
29219 result = result * 613 + (unsigned) str[i];
29220 break;
29222 case 'u':
29223 case 'e':
29224 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29225 break;
29226 case 'i':
29227 case 'n':
29228 result = result * 613 + (unsigned) XINT (k, fidx);
29229 break;
29230 case 'w':
29231 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29232 result = result * 613 + (unsigned) XWINT (k, fidx);
29233 else
29235 size_t i;
29236 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29237 result = result * 613 + (unsigned) (XWINT (k, fidx)
29238 >> CHAR_BIT * i);
29240 break;
29241 case '0':
29242 break;
29243 default:
29244 gcc_unreachable ();
29247 return result;
29250 hashval_t
29251 toc_hasher::hash (toc_hash_struct *thc)
29253 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29256 /* Compare H1 and H2 for equivalence. */
29258 bool
29259 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29261 rtx r1 = h1->key;
29262 rtx r2 = h2->key;
29264 if (h1->key_mode != h2->key_mode)
29265 return 0;
29267 return rtx_equal_p (r1, r2);
29270 /* These are the names given by the C++ front-end to vtables, and
29271 vtable-like objects. Ideally, this logic should not be here;
29272 instead, there should be some programmatic way of inquiring as
29273 to whether or not an object is a vtable. */
29275 #define VTABLE_NAME_P(NAME) \
29276 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29277 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29278 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29279 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29280 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29282 #ifdef NO_DOLLAR_IN_LABEL
29283 /* Return a GGC-allocated character string translating dollar signs in
29284 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29286 const char *
29287 rs6000_xcoff_strip_dollar (const char *name)
29289 char *strip, *p;
29290 const char *q;
29291 size_t len;
29293 q = (const char *) strchr (name, '$');
29295 if (q == 0 || q == name)
29296 return name;
29298 len = strlen (name);
29299 strip = XALLOCAVEC (char, len + 1);
29300 strcpy (strip, name);
29301 p = strip + (q - name);
29302 while (p)
29304 *p = '_';
29305 p = strchr (p + 1, '$');
29308 return ggc_alloc_string (strip, len);
29310 #endif
29312 void
29313 rs6000_output_symbol_ref (FILE *file, rtx x)
29315 const char *name = XSTR (x, 0);
29317 /* Currently C++ toc references to vtables can be emitted before it
29318 is decided whether the vtable is public or private. If this is
29319 the case, then the linker will eventually complain that there is
29320 a reference to an unknown section. Thus, for vtables only,
29321 we emit the TOC reference to reference the identifier and not the
29322 symbol. */
29323 if (VTABLE_NAME_P (name))
29325 RS6000_OUTPUT_BASENAME (file, name);
29327 else
29328 assemble_name (file, name);
29331 /* Output a TOC entry. We derive the entry name from what is being
29332 written. */
29334 void
29335 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29337 char buf[256];
29338 const char *name = buf;
29339 rtx base = x;
29340 HOST_WIDE_INT offset = 0;
29342 gcc_assert (!TARGET_NO_TOC);
29344 /* When the linker won't eliminate them, don't output duplicate
29345 TOC entries (this happens on AIX if there is any kind of TOC,
29346 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29347 CODE_LABELs. */
29348 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29350 struct toc_hash_struct *h;
29352 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29353 time because GGC is not initialized at that point. */
29354 if (toc_hash_table == NULL)
29355 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29357 h = ggc_alloc<toc_hash_struct> ();
29358 h->key = x;
29359 h->key_mode = mode;
29360 h->labelno = labelno;
29362 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29363 if (*found == NULL)
29364 *found = h;
29365 else /* This is indeed a duplicate.
29366 Set this label equal to that label. */
29368 fputs ("\t.set ", file);
29369 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29370 fprintf (file, "%d,", labelno);
29371 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29372 fprintf (file, "%d\n", ((*found)->labelno));
29374 #ifdef HAVE_AS_TLS
29375 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29376 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29377 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29379 fputs ("\t.set ", file);
29380 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29381 fprintf (file, "%d,", labelno);
29382 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29383 fprintf (file, "%d\n", ((*found)->labelno));
29385 #endif
29386 return;
29390 /* If we're going to put a double constant in the TOC, make sure it's
29391 aligned properly when strict alignment is on. */
29392 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29393 && STRICT_ALIGNMENT
29394 && GET_MODE_BITSIZE (mode) >= 64
29395 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29396 ASM_OUTPUT_ALIGN (file, 3);
29399 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29401 /* Handle FP constants specially. Note that if we have a minimal
29402 TOC, things we put here aren't actually in the TOC, so we can allow
29403 FP constants. */
29404 if (GET_CODE (x) == CONST_DOUBLE &&
29405 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29406 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29408 long k[4];
29410 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29411 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29412 else
29413 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29415 if (TARGET_64BIT)
29417 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29418 fputs (DOUBLE_INT_ASM_OP, file);
29419 else
29420 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29421 k[0] & 0xffffffff, k[1] & 0xffffffff,
29422 k[2] & 0xffffffff, k[3] & 0xffffffff);
29423 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29424 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29425 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29426 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29427 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29428 return;
29430 else
29432 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29433 fputs ("\t.long ", file);
29434 else
29435 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29436 k[0] & 0xffffffff, k[1] & 0xffffffff,
29437 k[2] & 0xffffffff, k[3] & 0xffffffff);
29438 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29439 k[0] & 0xffffffff, k[1] & 0xffffffff,
29440 k[2] & 0xffffffff, k[3] & 0xffffffff);
29441 return;
29444 else if (GET_CODE (x) == CONST_DOUBLE &&
29445 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29447 long k[2];
29449 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29450 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29451 else
29452 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29454 if (TARGET_64BIT)
29456 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29457 fputs (DOUBLE_INT_ASM_OP, file);
29458 else
29459 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29460 k[0] & 0xffffffff, k[1] & 0xffffffff);
29461 fprintf (file, "0x%lx%08lx\n",
29462 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29463 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29464 return;
29466 else
29468 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29469 fputs ("\t.long ", file);
29470 else
29471 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29472 k[0] & 0xffffffff, k[1] & 0xffffffff);
29473 fprintf (file, "0x%lx,0x%lx\n",
29474 k[0] & 0xffffffff, k[1] & 0xffffffff);
29475 return;
29478 else if (GET_CODE (x) == CONST_DOUBLE &&
29479 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29481 long l;
29483 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29484 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29485 else
29486 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29488 if (TARGET_64BIT)
29490 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29491 fputs (DOUBLE_INT_ASM_OP, file);
29492 else
29493 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29494 if (WORDS_BIG_ENDIAN)
29495 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29496 else
29497 fprintf (file, "0x%lx\n", l & 0xffffffff);
29498 return;
29500 else
29502 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29503 fputs ("\t.long ", file);
29504 else
29505 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29506 fprintf (file, "0x%lx\n", l & 0xffffffff);
29507 return;
29510 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29512 unsigned HOST_WIDE_INT low;
29513 HOST_WIDE_INT high;
29515 low = INTVAL (x) & 0xffffffff;
29516 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29518 /* TOC entries are always Pmode-sized, so when big-endian
29519 smaller integer constants in the TOC need to be padded.
29520 (This is still a win over putting the constants in
29521 a separate constant pool, because then we'd have
29522 to have both a TOC entry _and_ the actual constant.)
29524 For a 32-bit target, CONST_INT values are loaded and shifted
29525 entirely within `low' and can be stored in one TOC entry. */
29527 /* It would be easy to make this work, but it doesn't now. */
29528 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29530 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29532 low |= high << 32;
29533 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29534 high = (HOST_WIDE_INT) low >> 32;
29535 low &= 0xffffffff;
29538 if (TARGET_64BIT)
29540 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29541 fputs (DOUBLE_INT_ASM_OP, file);
29542 else
29543 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29544 (long) high & 0xffffffff, (long) low & 0xffffffff);
29545 fprintf (file, "0x%lx%08lx\n",
29546 (long) high & 0xffffffff, (long) low & 0xffffffff);
29547 return;
29549 else
29551 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29553 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29554 fputs ("\t.long ", file);
29555 else
29556 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29557 (long) high & 0xffffffff, (long) low & 0xffffffff);
29558 fprintf (file, "0x%lx,0x%lx\n",
29559 (long) high & 0xffffffff, (long) low & 0xffffffff);
29561 else
29563 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29564 fputs ("\t.long ", file);
29565 else
29566 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29567 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29569 return;
29573 if (GET_CODE (x) == CONST)
29575 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29576 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29578 base = XEXP (XEXP (x, 0), 0);
29579 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29582 switch (GET_CODE (base))
29584 case SYMBOL_REF:
29585 name = XSTR (base, 0);
29586 break;
29588 case LABEL_REF:
29589 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29590 CODE_LABEL_NUMBER (XEXP (base, 0)));
29591 break;
29593 case CODE_LABEL:
29594 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29595 break;
29597 default:
29598 gcc_unreachable ();
29601 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29602 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29603 else
29605 fputs ("\t.tc ", file);
29606 RS6000_OUTPUT_BASENAME (file, name);
29608 if (offset < 0)
29609 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29610 else if (offset)
29611 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29613 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29614 after other TOC symbols, reducing overflow of small TOC access
29615 to [TC] symbols. */
29616 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29617 ? "[TE]," : "[TC],", file);
29620 /* Currently C++ toc references to vtables can be emitted before it
29621 is decided whether the vtable is public or private. If this is
29622 the case, then the linker will eventually complain that there is
29623 a TOC reference to an unknown section. Thus, for vtables only,
29624 we emit the TOC reference to reference the symbol and not the
29625 section. */
29626 if (VTABLE_NAME_P (name))
29628 RS6000_OUTPUT_BASENAME (file, name);
29629 if (offset < 0)
29630 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29631 else if (offset > 0)
29632 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29634 else
29635 output_addr_const (file, x);
29637 #if HAVE_AS_TLS
29638 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29640 switch (SYMBOL_REF_TLS_MODEL (base))
29642 case 0:
29643 break;
29644 case TLS_MODEL_LOCAL_EXEC:
29645 fputs ("@le", file);
29646 break;
29647 case TLS_MODEL_INITIAL_EXEC:
29648 fputs ("@ie", file);
29649 break;
29650 /* Use global-dynamic for local-dynamic. */
29651 case TLS_MODEL_GLOBAL_DYNAMIC:
29652 case TLS_MODEL_LOCAL_DYNAMIC:
29653 putc ('\n', file);
29654 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29655 fputs ("\t.tc .", file);
29656 RS6000_OUTPUT_BASENAME (file, name);
29657 fputs ("[TC],", file);
29658 output_addr_const (file, x);
29659 fputs ("@m", file);
29660 break;
29661 default:
29662 gcc_unreachable ();
29665 #endif
29667 putc ('\n', file);
29670 /* Output an assembler pseudo-op to write an ASCII string of N characters
29671 starting at P to FILE.
29673 On the RS/6000, we have to do this using the .byte operation and
29674 write out special characters outside the quoted string.
29675 Also, the assembler is broken; very long strings are truncated,
29676 so we must artificially break them up early. */
29678 void
29679 output_ascii (FILE *file, const char *p, int n)
29681 char c;
29682 int i, count_string;
29683 const char *for_string = "\t.byte \"";
29684 const char *for_decimal = "\t.byte ";
29685 const char *to_close = NULL;
29687 count_string = 0;
29688 for (i = 0; i < n; i++)
29690 c = *p++;
29691 if (c >= ' ' && c < 0177)
29693 if (for_string)
29694 fputs (for_string, file);
29695 putc (c, file);
29697 /* Write two quotes to get one. */
29698 if (c == '"')
29700 putc (c, file);
29701 ++count_string;
29704 for_string = NULL;
29705 for_decimal = "\"\n\t.byte ";
29706 to_close = "\"\n";
29707 ++count_string;
29709 if (count_string >= 512)
29711 fputs (to_close, file);
29713 for_string = "\t.byte \"";
29714 for_decimal = "\t.byte ";
29715 to_close = NULL;
29716 count_string = 0;
29719 else
29721 if (for_decimal)
29722 fputs (for_decimal, file);
29723 fprintf (file, "%d", c);
29725 for_string = "\n\t.byte \"";
29726 for_decimal = ", ";
29727 to_close = "\n";
29728 count_string = 0;
29732 /* Now close the string if we have written one. Then end the line. */
29733 if (to_close)
29734 fputs (to_close, file);
29737 /* Generate a unique section name for FILENAME for a section type
29738 represented by SECTION_DESC. Output goes into BUF.
29740 SECTION_DESC can be any string, as long as it is different for each
29741 possible section type.
29743 We name the section in the same manner as xlc. The name begins with an
29744 underscore followed by the filename (after stripping any leading directory
29745 names) with the last period replaced by the string SECTION_DESC. If
29746 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29747 the name. */
29749 void
29750 rs6000_gen_section_name (char **buf, const char *filename,
29751 const char *section_desc)
29753 const char *q, *after_last_slash, *last_period = 0;
29754 char *p;
29755 int len;
29757 after_last_slash = filename;
29758 for (q = filename; *q; q++)
29760 if (*q == '/')
29761 after_last_slash = q + 1;
29762 else if (*q == '.')
29763 last_period = q;
29766 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29767 *buf = (char *) xmalloc (len);
29769 p = *buf;
29770 *p++ = '_';
29772 for (q = after_last_slash; *q; q++)
29774 if (q == last_period)
29776 strcpy (p, section_desc);
29777 p += strlen (section_desc);
29778 break;
29781 else if (ISALNUM (*q))
29782 *p++ = *q;
29785 if (last_period == 0)
29786 strcpy (p, section_desc);
29787 else
29788 *p = '\0';
29791 /* Emit profile function. */
29793 void
29794 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29796 /* Non-standard profiling for kernels, which just saves LR then calls
29797 _mcount without worrying about arg saves. The idea is to change
29798 the function prologue as little as possible as it isn't easy to
29799 account for arg save/restore code added just for _mcount. */
29800 if (TARGET_PROFILE_KERNEL)
29801 return;
29803 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29805 #ifndef NO_PROFILE_COUNTERS
29806 # define NO_PROFILE_COUNTERS 0
29807 #endif
29808 if (NO_PROFILE_COUNTERS)
29809 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29810 LCT_NORMAL, VOIDmode, 0);
29811 else
29813 char buf[30];
29814 const char *label_name;
29815 rtx fun;
29817 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29818 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29819 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29821 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29822 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
29825 else if (DEFAULT_ABI == ABI_DARWIN)
29827 const char *mcount_name = RS6000_MCOUNT;
29828 int caller_addr_regno = LR_REGNO;
29830 /* Be conservative and always set this, at least for now. */
29831 crtl->uses_pic_offset_table = 1;
29833 #if TARGET_MACHO
29834 /* For PIC code, set up a stub and collect the caller's address
29835 from r0, which is where the prologue puts it. */
29836 if (MACHOPIC_INDIRECT
29837 && crtl->uses_pic_offset_table)
29838 caller_addr_regno = 0;
29839 #endif
29840 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29841 LCT_NORMAL, VOIDmode, 1,
29842 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29846 /* Write function profiler code. */
29848 void
29849 output_function_profiler (FILE *file, int labelno)
29851 char buf[100];
29853 switch (DEFAULT_ABI)
29855 default:
29856 gcc_unreachable ();
29858 case ABI_V4:
29859 if (!TARGET_32BIT)
29861 warning (0, "no profiling of 64-bit code for this ABI");
29862 return;
29864 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29865 fprintf (file, "\tmflr %s\n", reg_names[0]);
29866 if (NO_PROFILE_COUNTERS)
29868 asm_fprintf (file, "\tstw %s,4(%s)\n",
29869 reg_names[0], reg_names[1]);
29871 else if (TARGET_SECURE_PLT && flag_pic)
29873 if (TARGET_LINK_STACK)
29875 char name[32];
29876 get_ppc476_thunk_name (name);
29877 asm_fprintf (file, "\tbl %s\n", name);
29879 else
29880 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
29881 asm_fprintf (file, "\tstw %s,4(%s)\n",
29882 reg_names[0], reg_names[1]);
29883 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29884 asm_fprintf (file, "\taddis %s,%s,",
29885 reg_names[12], reg_names[12]);
29886 assemble_name (file, buf);
29887 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
29888 assemble_name (file, buf);
29889 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
29891 else if (flag_pic == 1)
29893 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
29894 asm_fprintf (file, "\tstw %s,4(%s)\n",
29895 reg_names[0], reg_names[1]);
29896 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29897 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
29898 assemble_name (file, buf);
29899 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
29901 else if (flag_pic > 1)
29903 asm_fprintf (file, "\tstw %s,4(%s)\n",
29904 reg_names[0], reg_names[1]);
29905 /* Now, we need to get the address of the label. */
29906 if (TARGET_LINK_STACK)
29908 char name[32];
29909 get_ppc476_thunk_name (name);
29910 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
29911 assemble_name (file, buf);
29912 fputs ("-.\n1:", file);
29913 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29914 asm_fprintf (file, "\taddi %s,%s,4\n",
29915 reg_names[11], reg_names[11]);
29917 else
29919 fputs ("\tbcl 20,31,1f\n\t.long ", file);
29920 assemble_name (file, buf);
29921 fputs ("-.\n1:", file);
29922 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29924 asm_fprintf (file, "\tlwz %s,0(%s)\n",
29925 reg_names[0], reg_names[11]);
29926 asm_fprintf (file, "\tadd %s,%s,%s\n",
29927 reg_names[0], reg_names[0], reg_names[11]);
29929 else
29931 asm_fprintf (file, "\tlis %s,", reg_names[12]);
29932 assemble_name (file, buf);
29933 fputs ("@ha\n", file);
29934 asm_fprintf (file, "\tstw %s,4(%s)\n",
29935 reg_names[0], reg_names[1]);
29936 asm_fprintf (file, "\tla %s,", reg_names[0]);
29937 assemble_name (file, buf);
29938 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
29941 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29942 fprintf (file, "\tbl %s%s\n",
29943 RS6000_MCOUNT, flag_pic ? "@plt" : "");
29944 break;
29946 case ABI_AIX:
29947 case ABI_ELFv2:
29948 case ABI_DARWIN:
29949 /* Don't do anything, done in output_profile_hook (). */
29950 break;
29956 /* The following variable value is the last issued insn. */
29958 static rtx_insn *last_scheduled_insn;
29960 /* The following variable helps to balance issuing of load and
29961 store instructions */
29963 static int load_store_pendulum;
29965 /* The following variable helps pair divide insns during scheduling. */
29966 static int divide_cnt;
29967 /* The following variable helps pair and alternate vector and vector load
29968 insns during scheduling. */
29969 static int vec_pairing;
29972 /* Power4 load update and store update instructions are cracked into a
29973 load or store and an integer insn which are executed in the same cycle.
29974 Branches have their own dispatch slot which does not count against the
29975 GCC issue rate, but it changes the program flow so there are no other
29976 instructions to issue in this cycle. */
29978 static int
29979 rs6000_variable_issue_1 (rtx_insn *insn, int more)
29981 last_scheduled_insn = insn;
29982 if (GET_CODE (PATTERN (insn)) == USE
29983 || GET_CODE (PATTERN (insn)) == CLOBBER)
29985 cached_can_issue_more = more;
29986 return cached_can_issue_more;
29989 if (insn_terminates_group_p (insn, current_group))
29991 cached_can_issue_more = 0;
29992 return cached_can_issue_more;
29995 /* If no reservation, but reach here */
29996 if (recog_memoized (insn) < 0)
29997 return more;
29999 if (rs6000_sched_groups)
30001 if (is_microcoded_insn (insn))
30002 cached_can_issue_more = 0;
30003 else if (is_cracked_insn (insn))
30004 cached_can_issue_more = more > 2 ? more - 2 : 0;
30005 else
30006 cached_can_issue_more = more - 1;
30008 return cached_can_issue_more;
30011 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30012 return 0;
30014 cached_can_issue_more = more - 1;
30015 return cached_can_issue_more;
30018 static int
30019 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30021 int r = rs6000_variable_issue_1 (insn, more);
30022 if (verbose)
30023 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30024 return r;
30027 /* Adjust the cost of a scheduling dependency. Return the new cost of
30028 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30030 static int
30031 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30032 unsigned int)
30034 enum attr_type attr_type;
30036 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30037 return cost;
30039 switch (dep_type)
30041 case REG_DEP_TRUE:
30043 /* Data dependency; DEP_INSN writes a register that INSN reads
30044 some cycles later. */
30046 /* Separate a load from a narrower, dependent store. */
30047 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30048 && GET_CODE (PATTERN (insn)) == SET
30049 && GET_CODE (PATTERN (dep_insn)) == SET
30050 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30051 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30052 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30053 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30054 return cost + 14;
30056 attr_type = get_attr_type (insn);
30058 switch (attr_type)
30060 case TYPE_JMPREG:
30061 /* Tell the first scheduling pass about the latency between
30062 a mtctr and bctr (and mtlr and br/blr). The first
30063 scheduling pass will not know about this latency since
30064 the mtctr instruction, which has the latency associated
30065 to it, will be generated by reload. */
30066 return 4;
30067 case TYPE_BRANCH:
30068 /* Leave some extra cycles between a compare and its
30069 dependent branch, to inhibit expensive mispredicts. */
30070 if ((rs6000_cpu_attr == CPU_PPC603
30071 || rs6000_cpu_attr == CPU_PPC604
30072 || rs6000_cpu_attr == CPU_PPC604E
30073 || rs6000_cpu_attr == CPU_PPC620
30074 || rs6000_cpu_attr == CPU_PPC630
30075 || rs6000_cpu_attr == CPU_PPC750
30076 || rs6000_cpu_attr == CPU_PPC7400
30077 || rs6000_cpu_attr == CPU_PPC7450
30078 || rs6000_cpu_attr == CPU_PPCE5500
30079 || rs6000_cpu_attr == CPU_PPCE6500
30080 || rs6000_cpu_attr == CPU_POWER4
30081 || rs6000_cpu_attr == CPU_POWER5
30082 || rs6000_cpu_attr == CPU_POWER7
30083 || rs6000_cpu_attr == CPU_POWER8
30084 || rs6000_cpu_attr == CPU_POWER9
30085 || rs6000_cpu_attr == CPU_CELL)
30086 && recog_memoized (dep_insn)
30087 && (INSN_CODE (dep_insn) >= 0))
30089 switch (get_attr_type (dep_insn))
30091 case TYPE_CMP:
30092 case TYPE_FPCOMPARE:
30093 case TYPE_CR_LOGICAL:
30094 case TYPE_DELAYED_CR:
30095 return cost + 2;
30096 case TYPE_EXTS:
30097 case TYPE_MUL:
30098 if (get_attr_dot (dep_insn) == DOT_YES)
30099 return cost + 2;
30100 else
30101 break;
30102 case TYPE_SHIFT:
30103 if (get_attr_dot (dep_insn) == DOT_YES
30104 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30105 return cost + 2;
30106 else
30107 break;
30108 default:
30109 break;
30111 break;
30113 case TYPE_STORE:
30114 case TYPE_FPSTORE:
30115 if ((rs6000_cpu == PROCESSOR_POWER6)
30116 && recog_memoized (dep_insn)
30117 && (INSN_CODE (dep_insn) >= 0))
30120 if (GET_CODE (PATTERN (insn)) != SET)
30121 /* If this happens, we have to extend this to schedule
30122 optimally. Return default for now. */
30123 return cost;
30125 /* Adjust the cost for the case where the value written
30126 by a fixed point operation is used as the address
30127 gen value on a store. */
30128 switch (get_attr_type (dep_insn))
30130 case TYPE_LOAD:
30131 case TYPE_CNTLZ:
30133 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30134 return get_attr_sign_extend (dep_insn)
30135 == SIGN_EXTEND_YES ? 6 : 4;
30136 break;
30138 case TYPE_SHIFT:
30140 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30141 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30142 6 : 3;
30143 break;
30145 case TYPE_INTEGER:
30146 case TYPE_ADD:
30147 case TYPE_LOGICAL:
30148 case TYPE_EXTS:
30149 case TYPE_INSERT:
30151 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30152 return 3;
30153 break;
30155 case TYPE_STORE:
30156 case TYPE_FPLOAD:
30157 case TYPE_FPSTORE:
30159 if (get_attr_update (dep_insn) == UPDATE_YES
30160 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30161 return 3;
30162 break;
30164 case TYPE_MUL:
30166 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30167 return 17;
30168 break;
30170 case TYPE_DIV:
30172 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30173 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30174 break;
30176 default:
30177 break;
30180 break;
30182 case TYPE_LOAD:
30183 if ((rs6000_cpu == PROCESSOR_POWER6)
30184 && recog_memoized (dep_insn)
30185 && (INSN_CODE (dep_insn) >= 0))
30188 /* Adjust the cost for the case where the value written
30189 by a fixed point instruction is used within the address
30190 gen portion of a subsequent load(u)(x) */
30191 switch (get_attr_type (dep_insn))
30193 case TYPE_LOAD:
30194 case TYPE_CNTLZ:
30196 if (set_to_load_agen (dep_insn, insn))
30197 return get_attr_sign_extend (dep_insn)
30198 == SIGN_EXTEND_YES ? 6 : 4;
30199 break;
30201 case TYPE_SHIFT:
30203 if (set_to_load_agen (dep_insn, insn))
30204 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30205 6 : 3;
30206 break;
30208 case TYPE_INTEGER:
30209 case TYPE_ADD:
30210 case TYPE_LOGICAL:
30211 case TYPE_EXTS:
30212 case TYPE_INSERT:
30214 if (set_to_load_agen (dep_insn, insn))
30215 return 3;
30216 break;
30218 case TYPE_STORE:
30219 case TYPE_FPLOAD:
30220 case TYPE_FPSTORE:
30222 if (get_attr_update (dep_insn) == UPDATE_YES
30223 && set_to_load_agen (dep_insn, insn))
30224 return 3;
30225 break;
30227 case TYPE_MUL:
30229 if (set_to_load_agen (dep_insn, insn))
30230 return 17;
30231 break;
30233 case TYPE_DIV:
30235 if (set_to_load_agen (dep_insn, insn))
30236 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30237 break;
30239 default:
30240 break;
30243 break;
30245 case TYPE_FPLOAD:
30246 if ((rs6000_cpu == PROCESSOR_POWER6)
30247 && get_attr_update (insn) == UPDATE_NO
30248 && recog_memoized (dep_insn)
30249 && (INSN_CODE (dep_insn) >= 0)
30250 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30251 return 2;
30253 default:
30254 break;
30257 /* Fall out to return default cost. */
30259 break;
30261 case REG_DEP_OUTPUT:
30262 /* Output dependency; DEP_INSN writes a register that INSN writes some
30263 cycles later. */
30264 if ((rs6000_cpu == PROCESSOR_POWER6)
30265 && recog_memoized (dep_insn)
30266 && (INSN_CODE (dep_insn) >= 0))
30268 attr_type = get_attr_type (insn);
30270 switch (attr_type)
30272 case TYPE_FP:
30273 case TYPE_FPSIMPLE:
30274 if (get_attr_type (dep_insn) == TYPE_FP
30275 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30276 return 1;
30277 break;
30278 case TYPE_FPLOAD:
30279 if (get_attr_update (insn) == UPDATE_NO
30280 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30281 return 2;
30282 break;
30283 default:
30284 break;
30287 /* Fall through, no cost for output dependency. */
30288 /* FALLTHRU */
30290 case REG_DEP_ANTI:
30291 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30292 cycles later. */
30293 return 0;
30295 default:
30296 gcc_unreachable ();
30299 return cost;
30302 /* Debug version of rs6000_adjust_cost. */
30304 static int
30305 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30306 int cost, unsigned int dw)
30308 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30310 if (ret != cost)
30312 const char *dep;
30314 switch (dep_type)
30316 default: dep = "unknown depencency"; break;
30317 case REG_DEP_TRUE: dep = "data dependency"; break;
30318 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30319 case REG_DEP_ANTI: dep = "anti depencency"; break;
30322 fprintf (stderr,
30323 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30324 "%s, insn:\n", ret, cost, dep);
30326 debug_rtx (insn);
30329 return ret;
30332 /* The function returns a true if INSN is microcoded.
30333 Return false otherwise. */
30335 static bool
30336 is_microcoded_insn (rtx_insn *insn)
30338 if (!insn || !NONDEBUG_INSN_P (insn)
30339 || GET_CODE (PATTERN (insn)) == USE
30340 || GET_CODE (PATTERN (insn)) == CLOBBER)
30341 return false;
30343 if (rs6000_cpu_attr == CPU_CELL)
30344 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30346 if (rs6000_sched_groups
30347 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30349 enum attr_type type = get_attr_type (insn);
30350 if ((type == TYPE_LOAD
30351 && get_attr_update (insn) == UPDATE_YES
30352 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30353 || ((type == TYPE_LOAD || type == TYPE_STORE)
30354 && get_attr_update (insn) == UPDATE_YES
30355 && get_attr_indexed (insn) == INDEXED_YES)
30356 || type == TYPE_MFCR)
30357 return true;
30360 return false;
30363 /* The function returns true if INSN is cracked into 2 instructions
30364 by the processor (and therefore occupies 2 issue slots). */
30366 static bool
30367 is_cracked_insn (rtx_insn *insn)
30369 if (!insn || !NONDEBUG_INSN_P (insn)
30370 || GET_CODE (PATTERN (insn)) == USE
30371 || GET_CODE (PATTERN (insn)) == CLOBBER)
30372 return false;
30374 if (rs6000_sched_groups
30375 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30377 enum attr_type type = get_attr_type (insn);
30378 if ((type == TYPE_LOAD
30379 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30380 && get_attr_update (insn) == UPDATE_NO)
30381 || (type == TYPE_LOAD
30382 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30383 && get_attr_update (insn) == UPDATE_YES
30384 && get_attr_indexed (insn) == INDEXED_NO)
30385 || (type == TYPE_STORE
30386 && get_attr_update (insn) == UPDATE_YES
30387 && get_attr_indexed (insn) == INDEXED_NO)
30388 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30389 && get_attr_update (insn) == UPDATE_YES)
30390 || type == TYPE_DELAYED_CR
30391 || (type == TYPE_EXTS
30392 && get_attr_dot (insn) == DOT_YES)
30393 || (type == TYPE_SHIFT
30394 && get_attr_dot (insn) == DOT_YES
30395 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30396 || (type == TYPE_MUL
30397 && get_attr_dot (insn) == DOT_YES)
30398 || type == TYPE_DIV
30399 || (type == TYPE_INSERT
30400 && get_attr_size (insn) == SIZE_32))
30401 return true;
30404 return false;
30407 /* The function returns true if INSN can be issued only from
30408 the branch slot. */
30410 static bool
30411 is_branch_slot_insn (rtx_insn *insn)
30413 if (!insn || !NONDEBUG_INSN_P (insn)
30414 || GET_CODE (PATTERN (insn)) == USE
30415 || GET_CODE (PATTERN (insn)) == CLOBBER)
30416 return false;
30418 if (rs6000_sched_groups)
30420 enum attr_type type = get_attr_type (insn);
30421 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30422 return true;
30423 return false;
30426 return false;
30429 /* The function returns true if out_inst sets a value that is
30430 used in the address generation computation of in_insn */
30431 static bool
30432 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30434 rtx out_set, in_set;
30436 /* For performance reasons, only handle the simple case where
30437 both loads are a single_set. */
30438 out_set = single_set (out_insn);
30439 if (out_set)
30441 in_set = single_set (in_insn);
30442 if (in_set)
30443 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30446 return false;
30449 /* Try to determine base/offset/size parts of the given MEM.
30450 Return true if successful, false if all the values couldn't
30451 be determined.
30453 This function only looks for REG or REG+CONST address forms.
30454 REG+REG address form will return false. */
30456 static bool
30457 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30458 HOST_WIDE_INT *size)
30460 rtx addr_rtx;
30461 if MEM_SIZE_KNOWN_P (mem)
30462 *size = MEM_SIZE (mem);
30463 else
30464 return false;
30466 addr_rtx = (XEXP (mem, 0));
30467 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30468 addr_rtx = XEXP (addr_rtx, 1);
30470 *offset = 0;
30471 while (GET_CODE (addr_rtx) == PLUS
30472 && CONST_INT_P (XEXP (addr_rtx, 1)))
30474 *offset += INTVAL (XEXP (addr_rtx, 1));
30475 addr_rtx = XEXP (addr_rtx, 0);
30477 if (!REG_P (addr_rtx))
30478 return false;
30480 *base = addr_rtx;
30481 return true;
30484 /* The function returns true if the target storage location of
30485 mem1 is adjacent to the target storage location of mem2 */
30486 /* Return 1 if memory locations are adjacent. */
30488 static bool
30489 adjacent_mem_locations (rtx mem1, rtx mem2)
30491 rtx reg1, reg2;
30492 HOST_WIDE_INT off1, size1, off2, size2;
30494 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30495 && get_memref_parts (mem2, &reg2, &off2, &size2))
30496 return ((REGNO (reg1) == REGNO (reg2))
30497 && ((off1 + size1 == off2)
30498 || (off2 + size2 == off1)));
30500 return false;
30503 /* This function returns true if it can be determined that the two MEM
30504 locations overlap by at least 1 byte based on base reg/offset/size. */
30506 static bool
30507 mem_locations_overlap (rtx mem1, rtx mem2)
30509 rtx reg1, reg2;
30510 HOST_WIDE_INT off1, size1, off2, size2;
30512 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30513 && get_memref_parts (mem2, &reg2, &off2, &size2))
30514 return ((REGNO (reg1) == REGNO (reg2))
30515 && (((off1 <= off2) && (off1 + size1 > off2))
30516 || ((off2 <= off1) && (off2 + size2 > off1))));
30518 return false;
30521 /* A C statement (sans semicolon) to update the integer scheduling
30522 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30523 INSN earlier, reduce the priority to execute INSN later. Do not
30524 define this macro if you do not need to adjust the scheduling
30525 priorities of insns. */
30527 static int
30528 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30530 rtx load_mem, str_mem;
30531 /* On machines (like the 750) which have asymmetric integer units,
30532 where one integer unit can do multiply and divides and the other
30533 can't, reduce the priority of multiply/divide so it is scheduled
30534 before other integer operations. */
30536 #if 0
30537 if (! INSN_P (insn))
30538 return priority;
30540 if (GET_CODE (PATTERN (insn)) == USE)
30541 return priority;
30543 switch (rs6000_cpu_attr) {
30544 case CPU_PPC750:
30545 switch (get_attr_type (insn))
30547 default:
30548 break;
30550 case TYPE_MUL:
30551 case TYPE_DIV:
30552 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30553 priority, priority);
30554 if (priority >= 0 && priority < 0x01000000)
30555 priority >>= 3;
30556 break;
30559 #endif
30561 if (insn_must_be_first_in_group (insn)
30562 && reload_completed
30563 && current_sched_info->sched_max_insns_priority
30564 && rs6000_sched_restricted_insns_priority)
30567 /* Prioritize insns that can be dispatched only in the first
30568 dispatch slot. */
30569 if (rs6000_sched_restricted_insns_priority == 1)
30570 /* Attach highest priority to insn. This means that in
30571 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30572 precede 'priority' (critical path) considerations. */
30573 return current_sched_info->sched_max_insns_priority;
30574 else if (rs6000_sched_restricted_insns_priority == 2)
30575 /* Increase priority of insn by a minimal amount. This means that in
30576 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30577 considerations precede dispatch-slot restriction considerations. */
30578 return (priority + 1);
30581 if (rs6000_cpu == PROCESSOR_POWER6
30582 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30583 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30584 /* Attach highest priority to insn if the scheduler has just issued two
30585 stores and this instruction is a load, or two loads and this instruction
30586 is a store. Power6 wants loads and stores scheduled alternately
30587 when possible */
30588 return current_sched_info->sched_max_insns_priority;
30590 return priority;
30593 /* Return true if the instruction is nonpipelined on the Cell. */
30594 static bool
30595 is_nonpipeline_insn (rtx_insn *insn)
30597 enum attr_type type;
30598 if (!insn || !NONDEBUG_INSN_P (insn)
30599 || GET_CODE (PATTERN (insn)) == USE
30600 || GET_CODE (PATTERN (insn)) == CLOBBER)
30601 return false;
30603 type = get_attr_type (insn);
30604 if (type == TYPE_MUL
30605 || type == TYPE_DIV
30606 || type == TYPE_SDIV
30607 || type == TYPE_DDIV
30608 || type == TYPE_SSQRT
30609 || type == TYPE_DSQRT
30610 || type == TYPE_MFCR
30611 || type == TYPE_MFCRF
30612 || type == TYPE_MFJMPR)
30614 return true;
30616 return false;
30620 /* Return how many instructions the machine can issue per cycle. */
30622 static int
30623 rs6000_issue_rate (void)
30625 /* Unless scheduling for register pressure, use issue rate of 1 for
30626 first scheduling pass to decrease degradation. */
30627 if (!reload_completed && !flag_sched_pressure)
30628 return 1;
30630 switch (rs6000_cpu_attr) {
30631 case CPU_RS64A:
30632 case CPU_PPC601: /* ? */
30633 case CPU_PPC7450:
30634 return 3;
30635 case CPU_PPC440:
30636 case CPU_PPC603:
30637 case CPU_PPC750:
30638 case CPU_PPC7400:
30639 case CPU_PPC8540:
30640 case CPU_PPC8548:
30641 case CPU_CELL:
30642 case CPU_PPCE300C2:
30643 case CPU_PPCE300C3:
30644 case CPU_PPCE500MC:
30645 case CPU_PPCE500MC64:
30646 case CPU_PPCE5500:
30647 case CPU_PPCE6500:
30648 case CPU_TITAN:
30649 return 2;
30650 case CPU_PPC476:
30651 case CPU_PPC604:
30652 case CPU_PPC604E:
30653 case CPU_PPC620:
30654 case CPU_PPC630:
30655 return 4;
30656 case CPU_POWER4:
30657 case CPU_POWER5:
30658 case CPU_POWER6:
30659 case CPU_POWER7:
30660 return 5;
30661 case CPU_POWER8:
30662 return 7;
30663 case CPU_POWER9:
30664 return 6;
30665 default:
30666 return 1;
30670 /* Return how many instructions to look ahead for better insn
30671 scheduling. */
30673 static int
30674 rs6000_use_sched_lookahead (void)
30676 switch (rs6000_cpu_attr)
30678 case CPU_PPC8540:
30679 case CPU_PPC8548:
30680 return 4;
30682 case CPU_CELL:
30683 return (reload_completed ? 8 : 0);
30685 default:
30686 return 0;
30690 /* We are choosing insn from the ready queue. Return zero if INSN can be
30691 chosen. */
30692 static int
30693 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30695 if (ready_index == 0)
30696 return 0;
30698 if (rs6000_cpu_attr != CPU_CELL)
30699 return 0;
30701 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30703 if (!reload_completed
30704 || is_nonpipeline_insn (insn)
30705 || is_microcoded_insn (insn))
30706 return 1;
30708 return 0;
30711 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30712 and return true. */
30714 static bool
30715 find_mem_ref (rtx pat, rtx *mem_ref)
30717 const char * fmt;
30718 int i, j;
30720 /* stack_tie does not produce any real memory traffic. */
30721 if (tie_operand (pat, VOIDmode))
30722 return false;
30724 if (GET_CODE (pat) == MEM)
30726 *mem_ref = pat;
30727 return true;
30730 /* Recursively process the pattern. */
30731 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30733 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30735 if (fmt[i] == 'e')
30737 if (find_mem_ref (XEXP (pat, i), mem_ref))
30738 return true;
30740 else if (fmt[i] == 'E')
30741 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30743 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30744 return true;
30748 return false;
30751 /* Determine if PAT is a PATTERN of a load insn. */
30753 static bool
30754 is_load_insn1 (rtx pat, rtx *load_mem)
30756 if (!pat || pat == NULL_RTX)
30757 return false;
30759 if (GET_CODE (pat) == SET)
30760 return find_mem_ref (SET_SRC (pat), load_mem);
30762 if (GET_CODE (pat) == PARALLEL)
30764 int i;
30766 for (i = 0; i < XVECLEN (pat, 0); i++)
30767 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30768 return true;
30771 return false;
30774 /* Determine if INSN loads from memory. */
30776 static bool
30777 is_load_insn (rtx insn, rtx *load_mem)
30779 if (!insn || !INSN_P (insn))
30780 return false;
30782 if (CALL_P (insn))
30783 return false;
30785 return is_load_insn1 (PATTERN (insn), load_mem);
30788 /* Determine if PAT is a PATTERN of a store insn. */
30790 static bool
30791 is_store_insn1 (rtx pat, rtx *str_mem)
30793 if (!pat || pat == NULL_RTX)
30794 return false;
30796 if (GET_CODE (pat) == SET)
30797 return find_mem_ref (SET_DEST (pat), str_mem);
30799 if (GET_CODE (pat) == PARALLEL)
30801 int i;
30803 for (i = 0; i < XVECLEN (pat, 0); i++)
30804 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30805 return true;
30808 return false;
30811 /* Determine if INSN stores to memory. */
30813 static bool
30814 is_store_insn (rtx insn, rtx *str_mem)
30816 if (!insn || !INSN_P (insn))
30817 return false;
30819 return is_store_insn1 (PATTERN (insn), str_mem);
30822 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30824 static bool
30825 is_power9_pairable_vec_type (enum attr_type type)
30827 switch (type)
30829 case TYPE_VECSIMPLE:
30830 case TYPE_VECCOMPLEX:
30831 case TYPE_VECDIV:
30832 case TYPE_VECCMP:
30833 case TYPE_VECPERM:
30834 case TYPE_VECFLOAT:
30835 case TYPE_VECFDIV:
30836 case TYPE_VECDOUBLE:
30837 return true;
30838 default:
30839 break;
30841 return false;
30844 /* Returns whether the dependence between INSN and NEXT is considered
30845 costly by the given target. */
30847 static bool
30848 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30850 rtx insn;
30851 rtx next;
30852 rtx load_mem, str_mem;
30854 /* If the flag is not enabled - no dependence is considered costly;
30855 allow all dependent insns in the same group.
30856 This is the most aggressive option. */
30857 if (rs6000_sched_costly_dep == no_dep_costly)
30858 return false;
30860 /* If the flag is set to 1 - a dependence is always considered costly;
30861 do not allow dependent instructions in the same group.
30862 This is the most conservative option. */
30863 if (rs6000_sched_costly_dep == all_deps_costly)
30864 return true;
30866 insn = DEP_PRO (dep);
30867 next = DEP_CON (dep);
30869 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30870 && is_load_insn (next, &load_mem)
30871 && is_store_insn (insn, &str_mem))
30872 /* Prevent load after store in the same group. */
30873 return true;
30875 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30876 && is_load_insn (next, &load_mem)
30877 && is_store_insn (insn, &str_mem)
30878 && DEP_TYPE (dep) == REG_DEP_TRUE
30879 && mem_locations_overlap(str_mem, load_mem))
30880 /* Prevent load after store in the same group if it is a true
30881 dependence. */
30882 return true;
30884 /* The flag is set to X; dependences with latency >= X are considered costly,
30885 and will not be scheduled in the same group. */
30886 if (rs6000_sched_costly_dep <= max_dep_latency
30887 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
30888 return true;
30890 return false;
30893 /* Return the next insn after INSN that is found before TAIL is reached,
30894 skipping any "non-active" insns - insns that will not actually occupy
30895 an issue slot. Return NULL_RTX if such an insn is not found. */
30897 static rtx_insn *
30898 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
30900 if (insn == NULL_RTX || insn == tail)
30901 return NULL;
30903 while (1)
30905 insn = NEXT_INSN (insn);
30906 if (insn == NULL_RTX || insn == tail)
30907 return NULL;
30909 if (CALL_P (insn)
30910 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
30911 || (NONJUMP_INSN_P (insn)
30912 && GET_CODE (PATTERN (insn)) != USE
30913 && GET_CODE (PATTERN (insn)) != CLOBBER
30914 && INSN_CODE (insn) != CODE_FOR_stack_tie))
30915 break;
30917 return insn;
30920 /* Do Power9 specific sched_reorder2 reordering of ready list. */
30922 static int
30923 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
30925 int pos;
30926 int i;
30927 rtx_insn *tmp;
30928 enum attr_type type, type2;
30930 type = get_attr_type (last_scheduled_insn);
30932 /* Try to issue fixed point divides back-to-back in pairs so they will be
30933 routed to separate execution units and execute in parallel. */
30934 if (type == TYPE_DIV && divide_cnt == 0)
30936 /* First divide has been scheduled. */
30937 divide_cnt = 1;
30939 /* Scan the ready list looking for another divide, if found move it
30940 to the end of the list so it is chosen next. */
30941 pos = lastpos;
30942 while (pos >= 0)
30944 if (recog_memoized (ready[pos]) >= 0
30945 && get_attr_type (ready[pos]) == TYPE_DIV)
30947 tmp = ready[pos];
30948 for (i = pos; i < lastpos; i++)
30949 ready[i] = ready[i + 1];
30950 ready[lastpos] = tmp;
30951 break;
30953 pos--;
30956 else
30958 /* Last insn was the 2nd divide or not a divide, reset the counter. */
30959 divide_cnt = 0;
30961 /* The best dispatch throughput for vector and vector load insns can be
30962 achieved by interleaving a vector and vector load such that they'll
30963 dispatch to the same superslice. If this pairing cannot be achieved
30964 then it is best to pair vector insns together and vector load insns
30965 together.
30967 To aid in this pairing, vec_pairing maintains the current state with
30968 the following values:
30970 0 : Initial state, no vecload/vector pairing has been started.
30972 1 : A vecload or vector insn has been issued and a candidate for
30973 pairing has been found and moved to the end of the ready
30974 list. */
30975 if (type == TYPE_VECLOAD)
30977 /* Issued a vecload. */
30978 if (vec_pairing == 0)
30980 int vecload_pos = -1;
30981 /* We issued a single vecload, look for a vector insn to pair it
30982 with. If one isn't found, try to pair another vecload. */
30983 pos = lastpos;
30984 while (pos >= 0)
30986 if (recog_memoized (ready[pos]) >= 0)
30988 type2 = get_attr_type (ready[pos]);
30989 if (is_power9_pairable_vec_type (type2))
30991 /* Found a vector insn to pair with, move it to the
30992 end of the ready list so it is scheduled next. */
30993 tmp = ready[pos];
30994 for (i = pos; i < lastpos; i++)
30995 ready[i] = ready[i + 1];
30996 ready[lastpos] = tmp;
30997 vec_pairing = 1;
30998 return cached_can_issue_more;
31000 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31001 /* Remember position of first vecload seen. */
31002 vecload_pos = pos;
31004 pos--;
31006 if (vecload_pos >= 0)
31008 /* Didn't find a vector to pair with but did find a vecload,
31009 move it to the end of the ready list. */
31010 tmp = ready[vecload_pos];
31011 for (i = vecload_pos; i < lastpos; i++)
31012 ready[i] = ready[i + 1];
31013 ready[lastpos] = tmp;
31014 vec_pairing = 1;
31015 return cached_can_issue_more;
31019 else if (is_power9_pairable_vec_type (type))
31021 /* Issued a vector operation. */
31022 if (vec_pairing == 0)
31024 int vec_pos = -1;
31025 /* We issued a single vector insn, look for a vecload to pair it
31026 with. If one isn't found, try to pair another vector. */
31027 pos = lastpos;
31028 while (pos >= 0)
31030 if (recog_memoized (ready[pos]) >= 0)
31032 type2 = get_attr_type (ready[pos]);
31033 if (type2 == TYPE_VECLOAD)
31035 /* Found a vecload insn to pair with, move it to the
31036 end of the ready list so it is scheduled next. */
31037 tmp = ready[pos];
31038 for (i = pos; i < lastpos; i++)
31039 ready[i] = ready[i + 1];
31040 ready[lastpos] = tmp;
31041 vec_pairing = 1;
31042 return cached_can_issue_more;
31044 else if (is_power9_pairable_vec_type (type2)
31045 && vec_pos == -1)
31046 /* Remember position of first vector insn seen. */
31047 vec_pos = pos;
31049 pos--;
31051 if (vec_pos >= 0)
31053 /* Didn't find a vecload to pair with but did find a vector
31054 insn, move it to the end of the ready list. */
31055 tmp = ready[vec_pos];
31056 for (i = vec_pos; i < lastpos; i++)
31057 ready[i] = ready[i + 1];
31058 ready[lastpos] = tmp;
31059 vec_pairing = 1;
31060 return cached_can_issue_more;
31065 /* We've either finished a vec/vecload pair, couldn't find an insn to
31066 continue the current pair, or the last insn had nothing to do with
31067 with pairing. In any case, reset the state. */
31068 vec_pairing = 0;
31071 return cached_can_issue_more;
31074 /* We are about to begin issuing insns for this clock cycle. */
31076 static int
31077 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31078 rtx_insn **ready ATTRIBUTE_UNUSED,
31079 int *pn_ready ATTRIBUTE_UNUSED,
31080 int clock_var ATTRIBUTE_UNUSED)
31082 int n_ready = *pn_ready;
31084 if (sched_verbose)
31085 fprintf (dump, "// rs6000_sched_reorder :\n");
31087 /* Reorder the ready list, if the second to last ready insn
31088 is a nonepipeline insn. */
31089 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31091 if (is_nonpipeline_insn (ready[n_ready - 1])
31092 && (recog_memoized (ready[n_ready - 2]) > 0))
31093 /* Simply swap first two insns. */
31094 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31097 if (rs6000_cpu == PROCESSOR_POWER6)
31098 load_store_pendulum = 0;
31100 return rs6000_issue_rate ();
31103 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31105 static int
31106 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31107 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31109 if (sched_verbose)
31110 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31112 /* For Power6, we need to handle some special cases to try and keep the
31113 store queue from overflowing and triggering expensive flushes.
31115 This code monitors how load and store instructions are being issued
31116 and skews the ready list one way or the other to increase the likelihood
31117 that a desired instruction is issued at the proper time.
31119 A couple of things are done. First, we maintain a "load_store_pendulum"
31120 to track the current state of load/store issue.
31122 - If the pendulum is at zero, then no loads or stores have been
31123 issued in the current cycle so we do nothing.
31125 - If the pendulum is 1, then a single load has been issued in this
31126 cycle and we attempt to locate another load in the ready list to
31127 issue with it.
31129 - If the pendulum is -2, then two stores have already been
31130 issued in this cycle, so we increase the priority of the first load
31131 in the ready list to increase it's likelihood of being chosen first
31132 in the next cycle.
31134 - If the pendulum is -1, then a single store has been issued in this
31135 cycle and we attempt to locate another store in the ready list to
31136 issue with it, preferring a store to an adjacent memory location to
31137 facilitate store pairing in the store queue.
31139 - If the pendulum is 2, then two loads have already been
31140 issued in this cycle, so we increase the priority of the first store
31141 in the ready list to increase it's likelihood of being chosen first
31142 in the next cycle.
31144 - If the pendulum < -2 or > 2, then do nothing.
31146 Note: This code covers the most common scenarios. There exist non
31147 load/store instructions which make use of the LSU and which
31148 would need to be accounted for to strictly model the behavior
31149 of the machine. Those instructions are currently unaccounted
31150 for to help minimize compile time overhead of this code.
31152 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31154 int pos;
31155 int i;
31156 rtx_insn *tmp;
31157 rtx load_mem, str_mem;
31159 if (is_store_insn (last_scheduled_insn, &str_mem))
31160 /* Issuing a store, swing the load_store_pendulum to the left */
31161 load_store_pendulum--;
31162 else if (is_load_insn (last_scheduled_insn, &load_mem))
31163 /* Issuing a load, swing the load_store_pendulum to the right */
31164 load_store_pendulum++;
31165 else
31166 return cached_can_issue_more;
31168 /* If the pendulum is balanced, or there is only one instruction on
31169 the ready list, then all is well, so return. */
31170 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31171 return cached_can_issue_more;
31173 if (load_store_pendulum == 1)
31175 /* A load has been issued in this cycle. Scan the ready list
31176 for another load to issue with it */
31177 pos = *pn_ready-1;
31179 while (pos >= 0)
31181 if (is_load_insn (ready[pos], &load_mem))
31183 /* Found a load. Move it to the head of the ready list,
31184 and adjust it's priority so that it is more likely to
31185 stay there */
31186 tmp = ready[pos];
31187 for (i=pos; i<*pn_ready-1; i++)
31188 ready[i] = ready[i + 1];
31189 ready[*pn_ready-1] = tmp;
31191 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31192 INSN_PRIORITY (tmp)++;
31193 break;
31195 pos--;
31198 else if (load_store_pendulum == -2)
31200 /* Two stores have been issued in this cycle. Increase the
31201 priority of the first load in the ready list to favor it for
31202 issuing in the next cycle. */
31203 pos = *pn_ready-1;
31205 while (pos >= 0)
31207 if (is_load_insn (ready[pos], &load_mem)
31208 && !sel_sched_p ()
31209 && INSN_PRIORITY_KNOWN (ready[pos]))
31211 INSN_PRIORITY (ready[pos])++;
31213 /* Adjust the pendulum to account for the fact that a load
31214 was found and increased in priority. This is to prevent
31215 increasing the priority of multiple loads */
31216 load_store_pendulum--;
31218 break;
31220 pos--;
31223 else if (load_store_pendulum == -1)
31225 /* A store has been issued in this cycle. Scan the ready list for
31226 another store to issue with it, preferring a store to an adjacent
31227 memory location */
31228 int first_store_pos = -1;
31230 pos = *pn_ready-1;
31232 while (pos >= 0)
31234 if (is_store_insn (ready[pos], &str_mem))
31236 rtx str_mem2;
31237 /* Maintain the index of the first store found on the
31238 list */
31239 if (first_store_pos == -1)
31240 first_store_pos = pos;
31242 if (is_store_insn (last_scheduled_insn, &str_mem2)
31243 && adjacent_mem_locations (str_mem, str_mem2))
31245 /* Found an adjacent store. Move it to the head of the
31246 ready list, and adjust it's priority so that it is
31247 more likely to stay there */
31248 tmp = ready[pos];
31249 for (i=pos; i<*pn_ready-1; i++)
31250 ready[i] = ready[i + 1];
31251 ready[*pn_ready-1] = tmp;
31253 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31254 INSN_PRIORITY (tmp)++;
31256 first_store_pos = -1;
31258 break;
31261 pos--;
31264 if (first_store_pos >= 0)
31266 /* An adjacent store wasn't found, but a non-adjacent store was,
31267 so move the non-adjacent store to the front of the ready
31268 list, and adjust its priority so that it is more likely to
31269 stay there. */
31270 tmp = ready[first_store_pos];
31271 for (i=first_store_pos; i<*pn_ready-1; i++)
31272 ready[i] = ready[i + 1];
31273 ready[*pn_ready-1] = tmp;
31274 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31275 INSN_PRIORITY (tmp)++;
31278 else if (load_store_pendulum == 2)
31280 /* Two loads have been issued in this cycle. Increase the priority
31281 of the first store in the ready list to favor it for issuing in
31282 the next cycle. */
31283 pos = *pn_ready-1;
31285 while (pos >= 0)
31287 if (is_store_insn (ready[pos], &str_mem)
31288 && !sel_sched_p ()
31289 && INSN_PRIORITY_KNOWN (ready[pos]))
31291 INSN_PRIORITY (ready[pos])++;
31293 /* Adjust the pendulum to account for the fact that a store
31294 was found and increased in priority. This is to prevent
31295 increasing the priority of multiple stores */
31296 load_store_pendulum++;
31298 break;
31300 pos--;
31305 /* Do Power9 dependent reordering if necessary. */
31306 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31307 && recog_memoized (last_scheduled_insn) >= 0)
31308 return power9_sched_reorder2 (ready, *pn_ready - 1);
31310 return cached_can_issue_more;
31313 /* Return whether the presence of INSN causes a dispatch group termination
31314 of group WHICH_GROUP.
31316 If WHICH_GROUP == current_group, this function will return true if INSN
31317 causes the termination of the current group (i.e, the dispatch group to
31318 which INSN belongs). This means that INSN will be the last insn in the
31319 group it belongs to.
31321 If WHICH_GROUP == previous_group, this function will return true if INSN
31322 causes the termination of the previous group (i.e, the dispatch group that
31323 precedes the group to which INSN belongs). This means that INSN will be
31324 the first insn in the group it belongs to). */
31326 static bool
31327 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31329 bool first, last;
31331 if (! insn)
31332 return false;
31334 first = insn_must_be_first_in_group (insn);
31335 last = insn_must_be_last_in_group (insn);
31337 if (first && last)
31338 return true;
31340 if (which_group == current_group)
31341 return last;
31342 else if (which_group == previous_group)
31343 return first;
31345 return false;
31349 static bool
31350 insn_must_be_first_in_group (rtx_insn *insn)
31352 enum attr_type type;
31354 if (!insn
31355 || NOTE_P (insn)
31356 || DEBUG_INSN_P (insn)
31357 || GET_CODE (PATTERN (insn)) == USE
31358 || GET_CODE (PATTERN (insn)) == CLOBBER)
31359 return false;
31361 switch (rs6000_cpu)
31363 case PROCESSOR_POWER5:
31364 if (is_cracked_insn (insn))
31365 return true;
31366 /* FALLTHRU */
31367 case PROCESSOR_POWER4:
31368 if (is_microcoded_insn (insn))
31369 return true;
31371 if (!rs6000_sched_groups)
31372 return false;
31374 type = get_attr_type (insn);
31376 switch (type)
31378 case TYPE_MFCR:
31379 case TYPE_MFCRF:
31380 case TYPE_MTCR:
31381 case TYPE_DELAYED_CR:
31382 case TYPE_CR_LOGICAL:
31383 case TYPE_MTJMPR:
31384 case TYPE_MFJMPR:
31385 case TYPE_DIV:
31386 case TYPE_LOAD_L:
31387 case TYPE_STORE_C:
31388 case TYPE_ISYNC:
31389 case TYPE_SYNC:
31390 return true;
31391 default:
31392 break;
31394 break;
31395 case PROCESSOR_POWER6:
31396 type = get_attr_type (insn);
31398 switch (type)
31400 case TYPE_EXTS:
31401 case TYPE_CNTLZ:
31402 case TYPE_TRAP:
31403 case TYPE_MUL:
31404 case TYPE_INSERT:
31405 case TYPE_FPCOMPARE:
31406 case TYPE_MFCR:
31407 case TYPE_MTCR:
31408 case TYPE_MFJMPR:
31409 case TYPE_MTJMPR:
31410 case TYPE_ISYNC:
31411 case TYPE_SYNC:
31412 case TYPE_LOAD_L:
31413 case TYPE_STORE_C:
31414 return true;
31415 case TYPE_SHIFT:
31416 if (get_attr_dot (insn) == DOT_NO
31417 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31418 return true;
31419 else
31420 break;
31421 case TYPE_DIV:
31422 if (get_attr_size (insn) == SIZE_32)
31423 return true;
31424 else
31425 break;
31426 case TYPE_LOAD:
31427 case TYPE_STORE:
31428 case TYPE_FPLOAD:
31429 case TYPE_FPSTORE:
31430 if (get_attr_update (insn) == UPDATE_YES)
31431 return true;
31432 else
31433 break;
31434 default:
31435 break;
31437 break;
31438 case PROCESSOR_POWER7:
31439 type = get_attr_type (insn);
31441 switch (type)
31443 case TYPE_CR_LOGICAL:
31444 case TYPE_MFCR:
31445 case TYPE_MFCRF:
31446 case TYPE_MTCR:
31447 case TYPE_DIV:
31448 case TYPE_ISYNC:
31449 case TYPE_LOAD_L:
31450 case TYPE_STORE_C:
31451 case TYPE_MFJMPR:
31452 case TYPE_MTJMPR:
31453 return true;
31454 case TYPE_MUL:
31455 case TYPE_SHIFT:
31456 case TYPE_EXTS:
31457 if (get_attr_dot (insn) == DOT_YES)
31458 return true;
31459 else
31460 break;
31461 case TYPE_LOAD:
31462 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31463 || get_attr_update (insn) == UPDATE_YES)
31464 return true;
31465 else
31466 break;
31467 case TYPE_STORE:
31468 case TYPE_FPLOAD:
31469 case TYPE_FPSTORE:
31470 if (get_attr_update (insn) == UPDATE_YES)
31471 return true;
31472 else
31473 break;
31474 default:
31475 break;
31477 break;
31478 case PROCESSOR_POWER8:
31479 type = get_attr_type (insn);
31481 switch (type)
31483 case TYPE_CR_LOGICAL:
31484 case TYPE_DELAYED_CR:
31485 case TYPE_MFCR:
31486 case TYPE_MFCRF:
31487 case TYPE_MTCR:
31488 case TYPE_SYNC:
31489 case TYPE_ISYNC:
31490 case TYPE_LOAD_L:
31491 case TYPE_STORE_C:
31492 case TYPE_VECSTORE:
31493 case TYPE_MFJMPR:
31494 case TYPE_MTJMPR:
31495 return true;
31496 case TYPE_SHIFT:
31497 case TYPE_EXTS:
31498 case TYPE_MUL:
31499 if (get_attr_dot (insn) == DOT_YES)
31500 return true;
31501 else
31502 break;
31503 case TYPE_LOAD:
31504 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31505 || get_attr_update (insn) == UPDATE_YES)
31506 return true;
31507 else
31508 break;
31509 case TYPE_STORE:
31510 if (get_attr_update (insn) == UPDATE_YES
31511 && get_attr_indexed (insn) == INDEXED_YES)
31512 return true;
31513 else
31514 break;
31515 default:
31516 break;
31518 break;
31519 default:
31520 break;
31523 return false;
31526 static bool
31527 insn_must_be_last_in_group (rtx_insn *insn)
31529 enum attr_type type;
31531 if (!insn
31532 || NOTE_P (insn)
31533 || DEBUG_INSN_P (insn)
31534 || GET_CODE (PATTERN (insn)) == USE
31535 || GET_CODE (PATTERN (insn)) == CLOBBER)
31536 return false;
31538 switch (rs6000_cpu) {
31539 case PROCESSOR_POWER4:
31540 case PROCESSOR_POWER5:
31541 if (is_microcoded_insn (insn))
31542 return true;
31544 if (is_branch_slot_insn (insn))
31545 return true;
31547 break;
31548 case PROCESSOR_POWER6:
31549 type = get_attr_type (insn);
31551 switch (type)
31553 case TYPE_EXTS:
31554 case TYPE_CNTLZ:
31555 case TYPE_TRAP:
31556 case TYPE_MUL:
31557 case TYPE_FPCOMPARE:
31558 case TYPE_MFCR:
31559 case TYPE_MTCR:
31560 case TYPE_MFJMPR:
31561 case TYPE_MTJMPR:
31562 case TYPE_ISYNC:
31563 case TYPE_SYNC:
31564 case TYPE_LOAD_L:
31565 case TYPE_STORE_C:
31566 return true;
31567 case TYPE_SHIFT:
31568 if (get_attr_dot (insn) == DOT_NO
31569 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31570 return true;
31571 else
31572 break;
31573 case TYPE_DIV:
31574 if (get_attr_size (insn) == SIZE_32)
31575 return true;
31576 else
31577 break;
31578 default:
31579 break;
31581 break;
31582 case PROCESSOR_POWER7:
31583 type = get_attr_type (insn);
31585 switch (type)
31587 case TYPE_ISYNC:
31588 case TYPE_SYNC:
31589 case TYPE_LOAD_L:
31590 case TYPE_STORE_C:
31591 return true;
31592 case TYPE_LOAD:
31593 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31594 && get_attr_update (insn) == UPDATE_YES)
31595 return true;
31596 else
31597 break;
31598 case TYPE_STORE:
31599 if (get_attr_update (insn) == UPDATE_YES
31600 && get_attr_indexed (insn) == INDEXED_YES)
31601 return true;
31602 else
31603 break;
31604 default:
31605 break;
31607 break;
31608 case PROCESSOR_POWER8:
31609 type = get_attr_type (insn);
31611 switch (type)
31613 case TYPE_MFCR:
31614 case TYPE_MTCR:
31615 case TYPE_ISYNC:
31616 case TYPE_SYNC:
31617 case TYPE_LOAD_L:
31618 case TYPE_STORE_C:
31619 return true;
31620 case TYPE_LOAD:
31621 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31622 && get_attr_update (insn) == UPDATE_YES)
31623 return true;
31624 else
31625 break;
31626 case TYPE_STORE:
31627 if (get_attr_update (insn) == UPDATE_YES
31628 && get_attr_indexed (insn) == INDEXED_YES)
31629 return true;
31630 else
31631 break;
31632 default:
31633 break;
31635 break;
31636 default:
31637 break;
31640 return false;
31643 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31644 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31646 static bool
31647 is_costly_group (rtx *group_insns, rtx next_insn)
31649 int i;
31650 int issue_rate = rs6000_issue_rate ();
31652 for (i = 0; i < issue_rate; i++)
31654 sd_iterator_def sd_it;
31655 dep_t dep;
31656 rtx insn = group_insns[i];
31658 if (!insn)
31659 continue;
31661 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31663 rtx next = DEP_CON (dep);
31665 if (next == next_insn
31666 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31667 return true;
31671 return false;
31674 /* Utility of the function redefine_groups.
31675 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31676 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31677 to keep it "far" (in a separate group) from GROUP_INSNS, following
31678 one of the following schemes, depending on the value of the flag
31679 -minsert_sched_nops = X:
31680 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31681 in order to force NEXT_INSN into a separate group.
31682 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31683 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31684 insertion (has a group just ended, how many vacant issue slots remain in the
31685 last group, and how many dispatch groups were encountered so far). */
31687 static int
31688 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31689 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31690 int *group_count)
31692 rtx nop;
31693 bool force;
31694 int issue_rate = rs6000_issue_rate ();
31695 bool end = *group_end;
31696 int i;
31698 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31699 return can_issue_more;
31701 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31702 return can_issue_more;
31704 force = is_costly_group (group_insns, next_insn);
31705 if (!force)
31706 return can_issue_more;
31708 if (sched_verbose > 6)
31709 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31710 *group_count ,can_issue_more);
31712 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31714 if (*group_end)
31715 can_issue_more = 0;
31717 /* Since only a branch can be issued in the last issue_slot, it is
31718 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31719 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31720 in this case the last nop will start a new group and the branch
31721 will be forced to the new group. */
31722 if (can_issue_more && !is_branch_slot_insn (next_insn))
31723 can_issue_more--;
31725 /* Do we have a special group ending nop? */
31726 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
31727 || rs6000_cpu_attr == CPU_POWER8)
31729 nop = gen_group_ending_nop ();
31730 emit_insn_before (nop, next_insn);
31731 can_issue_more = 0;
31733 else
31734 while (can_issue_more > 0)
31736 nop = gen_nop ();
31737 emit_insn_before (nop, next_insn);
31738 can_issue_more--;
31741 *group_end = true;
31742 return 0;
31745 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31747 int n_nops = rs6000_sched_insert_nops;
31749 /* Nops can't be issued from the branch slot, so the effective
31750 issue_rate for nops is 'issue_rate - 1'. */
31751 if (can_issue_more == 0)
31752 can_issue_more = issue_rate;
31753 can_issue_more--;
31754 if (can_issue_more == 0)
31756 can_issue_more = issue_rate - 1;
31757 (*group_count)++;
31758 end = true;
31759 for (i = 0; i < issue_rate; i++)
31761 group_insns[i] = 0;
31765 while (n_nops > 0)
31767 nop = gen_nop ();
31768 emit_insn_before (nop, next_insn);
31769 if (can_issue_more == issue_rate - 1) /* new group begins */
31770 end = false;
31771 can_issue_more--;
31772 if (can_issue_more == 0)
31774 can_issue_more = issue_rate - 1;
31775 (*group_count)++;
31776 end = true;
31777 for (i = 0; i < issue_rate; i++)
31779 group_insns[i] = 0;
31782 n_nops--;
31785 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31786 can_issue_more++;
31788 /* Is next_insn going to start a new group? */
31789 *group_end
31790 = (end
31791 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31792 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31793 || (can_issue_more < issue_rate &&
31794 insn_terminates_group_p (next_insn, previous_group)));
31795 if (*group_end && end)
31796 (*group_count)--;
31798 if (sched_verbose > 6)
31799 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31800 *group_count, can_issue_more);
31801 return can_issue_more;
31804 return can_issue_more;
31807 /* This function tries to synch the dispatch groups that the compiler "sees"
31808 with the dispatch groups that the processor dispatcher is expected to
31809 form in practice. It tries to achieve this synchronization by forcing the
31810 estimated processor grouping on the compiler (as opposed to the function
31811 'pad_goups' which tries to force the scheduler's grouping on the processor).
31813 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31814 examines the (estimated) dispatch groups that will be formed by the processor
31815 dispatcher. It marks these group boundaries to reflect the estimated
31816 processor grouping, overriding the grouping that the scheduler had marked.
31817 Depending on the value of the flag '-minsert-sched-nops' this function can
31818 force certain insns into separate groups or force a certain distance between
31819 them by inserting nops, for example, if there exists a "costly dependence"
31820 between the insns.
31822 The function estimates the group boundaries that the processor will form as
31823 follows: It keeps track of how many vacant issue slots are available after
31824 each insn. A subsequent insn will start a new group if one of the following
31825 4 cases applies:
31826 - no more vacant issue slots remain in the current dispatch group.
31827 - only the last issue slot, which is the branch slot, is vacant, but the next
31828 insn is not a branch.
31829 - only the last 2 or less issue slots, including the branch slot, are vacant,
31830 which means that a cracked insn (which occupies two issue slots) can't be
31831 issued in this group.
31832 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31833 start a new group. */
31835 static int
31836 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31837 rtx_insn *tail)
31839 rtx_insn *insn, *next_insn;
31840 int issue_rate;
31841 int can_issue_more;
31842 int slot, i;
31843 bool group_end;
31844 int group_count = 0;
31845 rtx *group_insns;
31847 /* Initialize. */
31848 issue_rate = rs6000_issue_rate ();
31849 group_insns = XALLOCAVEC (rtx, issue_rate);
31850 for (i = 0; i < issue_rate; i++)
31852 group_insns[i] = 0;
31854 can_issue_more = issue_rate;
31855 slot = 0;
31856 insn = get_next_active_insn (prev_head_insn, tail);
31857 group_end = false;
31859 while (insn != NULL_RTX)
31861 slot = (issue_rate - can_issue_more);
31862 group_insns[slot] = insn;
31863 can_issue_more =
31864 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31865 if (insn_terminates_group_p (insn, current_group))
31866 can_issue_more = 0;
31868 next_insn = get_next_active_insn (insn, tail);
31869 if (next_insn == NULL_RTX)
31870 return group_count + 1;
31872 /* Is next_insn going to start a new group? */
31873 group_end
31874 = (can_issue_more == 0
31875 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31876 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31877 || (can_issue_more < issue_rate &&
31878 insn_terminates_group_p (next_insn, previous_group)));
31880 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
31881 next_insn, &group_end, can_issue_more,
31882 &group_count);
31884 if (group_end)
31886 group_count++;
31887 can_issue_more = 0;
31888 for (i = 0; i < issue_rate; i++)
31890 group_insns[i] = 0;
31894 if (GET_MODE (next_insn) == TImode && can_issue_more)
31895 PUT_MODE (next_insn, VOIDmode);
31896 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
31897 PUT_MODE (next_insn, TImode);
31899 insn = next_insn;
31900 if (can_issue_more == 0)
31901 can_issue_more = issue_rate;
31902 } /* while */
31904 return group_count;
31907 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
31908 dispatch group boundaries that the scheduler had marked. Pad with nops
31909 any dispatch groups which have vacant issue slots, in order to force the
31910 scheduler's grouping on the processor dispatcher. The function
31911 returns the number of dispatch groups found. */
31913 static int
31914 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31915 rtx_insn *tail)
31917 rtx_insn *insn, *next_insn;
31918 rtx nop;
31919 int issue_rate;
31920 int can_issue_more;
31921 int group_end;
31922 int group_count = 0;
31924 /* Initialize issue_rate. */
31925 issue_rate = rs6000_issue_rate ();
31926 can_issue_more = issue_rate;
31928 insn = get_next_active_insn (prev_head_insn, tail);
31929 next_insn = get_next_active_insn (insn, tail);
31931 while (insn != NULL_RTX)
31933 can_issue_more =
31934 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31936 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
31938 if (next_insn == NULL_RTX)
31939 break;
31941 if (group_end)
31943 /* If the scheduler had marked group termination at this location
31944 (between insn and next_insn), and neither insn nor next_insn will
31945 force group termination, pad the group with nops to force group
31946 termination. */
31947 if (can_issue_more
31948 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
31949 && !insn_terminates_group_p (insn, current_group)
31950 && !insn_terminates_group_p (next_insn, previous_group))
31952 if (!is_branch_slot_insn (next_insn))
31953 can_issue_more--;
31955 while (can_issue_more)
31957 nop = gen_nop ();
31958 emit_insn_before (nop, next_insn);
31959 can_issue_more--;
31963 can_issue_more = issue_rate;
31964 group_count++;
31967 insn = next_insn;
31968 next_insn = get_next_active_insn (insn, tail);
31971 return group_count;
31974 /* We're beginning a new block. Initialize data structures as necessary. */
31976 static void
31977 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
31978 int sched_verbose ATTRIBUTE_UNUSED,
31979 int max_ready ATTRIBUTE_UNUSED)
31981 last_scheduled_insn = NULL;
31982 load_store_pendulum = 0;
31983 divide_cnt = 0;
31984 vec_pairing = 0;
31987 /* The following function is called at the end of scheduling BB.
31988 After reload, it inserts nops at insn group bundling. */
31990 static void
31991 rs6000_sched_finish (FILE *dump, int sched_verbose)
31993 int n_groups;
31995 if (sched_verbose)
31996 fprintf (dump, "=== Finishing schedule.\n");
31998 if (reload_completed && rs6000_sched_groups)
32000 /* Do not run sched_finish hook when selective scheduling enabled. */
32001 if (sel_sched_p ())
32002 return;
32004 if (rs6000_sched_insert_nops == sched_finish_none)
32005 return;
32007 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32008 n_groups = pad_groups (dump, sched_verbose,
32009 current_sched_info->prev_head,
32010 current_sched_info->next_tail);
32011 else
32012 n_groups = redefine_groups (dump, sched_verbose,
32013 current_sched_info->prev_head,
32014 current_sched_info->next_tail);
32016 if (sched_verbose >= 6)
32018 fprintf (dump, "ngroups = %d\n", n_groups);
32019 print_rtl (dump, current_sched_info->prev_head);
32020 fprintf (dump, "Done finish_sched\n");
32025 struct rs6000_sched_context
32027 short cached_can_issue_more;
32028 rtx_insn *last_scheduled_insn;
32029 int load_store_pendulum;
32030 int divide_cnt;
32031 int vec_pairing;
32034 typedef struct rs6000_sched_context rs6000_sched_context_def;
32035 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32037 /* Allocate store for new scheduling context. */
32038 static void *
32039 rs6000_alloc_sched_context (void)
32041 return xmalloc (sizeof (rs6000_sched_context_def));
32044 /* If CLEAN_P is true then initializes _SC with clean data,
32045 and from the global context otherwise. */
32046 static void
32047 rs6000_init_sched_context (void *_sc, bool clean_p)
32049 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32051 if (clean_p)
32053 sc->cached_can_issue_more = 0;
32054 sc->last_scheduled_insn = NULL;
32055 sc->load_store_pendulum = 0;
32056 sc->divide_cnt = 0;
32057 sc->vec_pairing = 0;
32059 else
32061 sc->cached_can_issue_more = cached_can_issue_more;
32062 sc->last_scheduled_insn = last_scheduled_insn;
32063 sc->load_store_pendulum = load_store_pendulum;
32064 sc->divide_cnt = divide_cnt;
32065 sc->vec_pairing = vec_pairing;
32069 /* Sets the global scheduling context to the one pointed to by _SC. */
32070 static void
32071 rs6000_set_sched_context (void *_sc)
32073 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32075 gcc_assert (sc != NULL);
32077 cached_can_issue_more = sc->cached_can_issue_more;
32078 last_scheduled_insn = sc->last_scheduled_insn;
32079 load_store_pendulum = sc->load_store_pendulum;
32080 divide_cnt = sc->divide_cnt;
32081 vec_pairing = sc->vec_pairing;
32084 /* Free _SC. */
32085 static void
32086 rs6000_free_sched_context (void *_sc)
32088 gcc_assert (_sc != NULL);
32090 free (_sc);
32093 static bool
32094 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32096 switch (get_attr_type (insn))
32098 case TYPE_DIV:
32099 case TYPE_SDIV:
32100 case TYPE_DDIV:
32101 case TYPE_VECDIV:
32102 case TYPE_SSQRT:
32103 case TYPE_DSQRT:
32104 return false;
32106 default:
32107 return true;
32111 /* Length in units of the trampoline for entering a nested function. */
32114 rs6000_trampoline_size (void)
32116 int ret = 0;
32118 switch (DEFAULT_ABI)
32120 default:
32121 gcc_unreachable ();
32123 case ABI_AIX:
32124 ret = (TARGET_32BIT) ? 12 : 24;
32125 break;
32127 case ABI_ELFv2:
32128 gcc_assert (!TARGET_32BIT);
32129 ret = 32;
32130 break;
32132 case ABI_DARWIN:
32133 case ABI_V4:
32134 ret = (TARGET_32BIT) ? 40 : 48;
32135 break;
32138 return ret;
32141 /* Emit RTL insns to initialize the variable parts of a trampoline.
32142 FNADDR is an RTX for the address of the function's pure code.
32143 CXT is an RTX for the static chain value for the function. */
32145 static void
32146 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32148 int regsize = (TARGET_32BIT) ? 4 : 8;
32149 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32150 rtx ctx_reg = force_reg (Pmode, cxt);
32151 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32153 switch (DEFAULT_ABI)
32155 default:
32156 gcc_unreachable ();
32158 /* Under AIX, just build the 3 word function descriptor */
32159 case ABI_AIX:
32161 rtx fnmem, fn_reg, toc_reg;
32163 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32164 error ("You cannot take the address of a nested function if you use "
32165 "the -mno-pointers-to-nested-functions option.");
32167 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32168 fn_reg = gen_reg_rtx (Pmode);
32169 toc_reg = gen_reg_rtx (Pmode);
32171 /* Macro to shorten the code expansions below. */
32172 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32174 m_tramp = replace_equiv_address (m_tramp, addr);
32176 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32177 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32178 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32179 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32180 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32182 # undef MEM_PLUS
32184 break;
32186 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32187 case ABI_ELFv2:
32188 case ABI_DARWIN:
32189 case ABI_V4:
32190 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32191 LCT_NORMAL, VOIDmode, 4,
32192 addr, Pmode,
32193 GEN_INT (rs6000_trampoline_size ()), SImode,
32194 fnaddr, Pmode,
32195 ctx_reg, Pmode);
32196 break;
32201 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32202 identifier as an argument, so the front end shouldn't look it up. */
32204 static bool
32205 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32207 return is_attribute_p ("altivec", attr_id);
32210 /* Handle the "altivec" attribute. The attribute may have
32211 arguments as follows:
32213 __attribute__((altivec(vector__)))
32214 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32215 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32217 and may appear more than once (e.g., 'vector bool char') in a
32218 given declaration. */
32220 static tree
32221 rs6000_handle_altivec_attribute (tree *node,
32222 tree name ATTRIBUTE_UNUSED,
32223 tree args,
32224 int flags ATTRIBUTE_UNUSED,
32225 bool *no_add_attrs)
32227 tree type = *node, result = NULL_TREE;
32228 machine_mode mode;
32229 int unsigned_p;
32230 char altivec_type
32231 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32232 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32233 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32234 : '?');
32236 while (POINTER_TYPE_P (type)
32237 || TREE_CODE (type) == FUNCTION_TYPE
32238 || TREE_CODE (type) == METHOD_TYPE
32239 || TREE_CODE (type) == ARRAY_TYPE)
32240 type = TREE_TYPE (type);
32242 mode = TYPE_MODE (type);
32244 /* Check for invalid AltiVec type qualifiers. */
32245 if (type == long_double_type_node)
32246 error ("use of %<long double%> in AltiVec types is invalid");
32247 else if (type == boolean_type_node)
32248 error ("use of boolean types in AltiVec types is invalid");
32249 else if (TREE_CODE (type) == COMPLEX_TYPE)
32250 error ("use of %<complex%> in AltiVec types is invalid");
32251 else if (DECIMAL_FLOAT_MODE_P (mode))
32252 error ("use of decimal floating point types in AltiVec types is invalid");
32253 else if (!TARGET_VSX)
32255 if (type == long_unsigned_type_node || type == long_integer_type_node)
32257 if (TARGET_64BIT)
32258 error ("use of %<long%> in AltiVec types is invalid for "
32259 "64-bit code without -mvsx");
32260 else if (rs6000_warn_altivec_long)
32261 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32262 "use %<int%>");
32264 else if (type == long_long_unsigned_type_node
32265 || type == long_long_integer_type_node)
32266 error ("use of %<long long%> in AltiVec types is invalid without "
32267 "-mvsx");
32268 else if (type == double_type_node)
32269 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
32272 switch (altivec_type)
32274 case 'v':
32275 unsigned_p = TYPE_UNSIGNED (type);
32276 switch (mode)
32278 case TImode:
32279 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32280 break;
32281 case DImode:
32282 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32283 break;
32284 case SImode:
32285 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32286 break;
32287 case HImode:
32288 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32289 break;
32290 case QImode:
32291 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32292 break;
32293 case SFmode: result = V4SF_type_node; break;
32294 case DFmode: result = V2DF_type_node; break;
32295 /* If the user says 'vector int bool', we may be handed the 'bool'
32296 attribute _before_ the 'vector' attribute, and so select the
32297 proper type in the 'b' case below. */
32298 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
32299 case V2DImode: case V2DFmode:
32300 result = type;
32301 default: break;
32303 break;
32304 case 'b':
32305 switch (mode)
32307 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
32308 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
32309 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
32310 case QImode: case V16QImode: result = bool_V16QI_type_node;
32311 default: break;
32313 break;
32314 case 'p':
32315 switch (mode)
32317 case V8HImode: result = pixel_V8HI_type_node;
32318 default: break;
32320 default: break;
32323 /* Propagate qualifiers attached to the element type
32324 onto the vector type. */
32325 if (result && result != type && TYPE_QUALS (type))
32326 result = build_qualified_type (result, TYPE_QUALS (type));
32328 *no_add_attrs = true; /* No need to hang on to the attribute. */
32330 if (result)
32331 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32333 return NULL_TREE;
32336 /* AltiVec defines four built-in scalar types that serve as vector
32337 elements; we must teach the compiler how to mangle them. */
32339 static const char *
32340 rs6000_mangle_type (const_tree type)
32342 type = TYPE_MAIN_VARIANT (type);
32344 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32345 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32346 return NULL;
32348 if (type == bool_char_type_node) return "U6__boolc";
32349 if (type == bool_short_type_node) return "U6__bools";
32350 if (type == pixel_type_node) return "u7__pixel";
32351 if (type == bool_int_type_node) return "U6__booli";
32352 if (type == bool_long_type_node) return "U6__booll";
32354 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32355 "g" for IBM extended double, no matter whether it is long double (using
32356 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32357 if (TARGET_FLOAT128_TYPE)
32359 if (type == ieee128_float_type_node)
32360 return "U10__float128";
32362 if (type == ibm128_float_type_node)
32363 return "g";
32365 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
32366 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32369 /* Mangle IBM extended float long double as `g' (__float128) on
32370 powerpc*-linux where long-double-64 previously was the default. */
32371 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32372 && TARGET_ELF
32373 && TARGET_LONG_DOUBLE_128
32374 && !TARGET_IEEEQUAD)
32375 return "g";
32377 /* For all other types, use normal C++ mangling. */
32378 return NULL;
32381 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32382 struct attribute_spec.handler. */
32384 static tree
32385 rs6000_handle_longcall_attribute (tree *node, tree name,
32386 tree args ATTRIBUTE_UNUSED,
32387 int flags ATTRIBUTE_UNUSED,
32388 bool *no_add_attrs)
32390 if (TREE_CODE (*node) != FUNCTION_TYPE
32391 && TREE_CODE (*node) != FIELD_DECL
32392 && TREE_CODE (*node) != TYPE_DECL)
32394 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32395 name);
32396 *no_add_attrs = true;
32399 return NULL_TREE;
32402 /* Set longcall attributes on all functions declared when
32403 rs6000_default_long_calls is true. */
32404 static void
32405 rs6000_set_default_type_attributes (tree type)
32407 if (rs6000_default_long_calls
32408 && (TREE_CODE (type) == FUNCTION_TYPE
32409 || TREE_CODE (type) == METHOD_TYPE))
32410 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32411 NULL_TREE,
32412 TYPE_ATTRIBUTES (type));
32414 #if TARGET_MACHO
32415 darwin_set_default_type_attributes (type);
32416 #endif
32419 /* Return a reference suitable for calling a function with the
32420 longcall attribute. */
32423 rs6000_longcall_ref (rtx call_ref)
32425 const char *call_name;
32426 tree node;
32428 if (GET_CODE (call_ref) != SYMBOL_REF)
32429 return call_ref;
32431 /* System V adds '.' to the internal name, so skip them. */
32432 call_name = XSTR (call_ref, 0);
32433 if (*call_name == '.')
32435 while (*call_name == '.')
32436 call_name++;
32438 node = get_identifier (call_name);
32439 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32442 return force_reg (Pmode, call_ref);
32445 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32446 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32447 #endif
32449 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32450 struct attribute_spec.handler. */
32451 static tree
32452 rs6000_handle_struct_attribute (tree *node, tree name,
32453 tree args ATTRIBUTE_UNUSED,
32454 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32456 tree *type = NULL;
32457 if (DECL_P (*node))
32459 if (TREE_CODE (*node) == TYPE_DECL)
32460 type = &TREE_TYPE (*node);
32462 else
32463 type = node;
32465 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32466 || TREE_CODE (*type) == UNION_TYPE)))
32468 warning (OPT_Wattributes, "%qE attribute ignored", name);
32469 *no_add_attrs = true;
32472 else if ((is_attribute_p ("ms_struct", name)
32473 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32474 || ((is_attribute_p ("gcc_struct", name)
32475 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32477 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32478 name);
32479 *no_add_attrs = true;
32482 return NULL_TREE;
32485 static bool
32486 rs6000_ms_bitfield_layout_p (const_tree record_type)
32488 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32489 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32490 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32493 #ifdef USING_ELFOS_H
32495 /* A get_unnamed_section callback, used for switching to toc_section. */
32497 static void
32498 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32500 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32501 && TARGET_MINIMAL_TOC)
32503 if (!toc_initialized)
32505 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32506 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32507 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32508 fprintf (asm_out_file, "\t.tc ");
32509 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32510 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32511 fprintf (asm_out_file, "\n");
32513 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32514 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32515 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32516 fprintf (asm_out_file, " = .+32768\n");
32517 toc_initialized = 1;
32519 else
32520 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32522 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32524 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32525 if (!toc_initialized)
32527 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32528 toc_initialized = 1;
32531 else
32533 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32534 if (!toc_initialized)
32536 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32537 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32538 fprintf (asm_out_file, " = .+32768\n");
32539 toc_initialized = 1;
32544 /* Implement TARGET_ASM_INIT_SECTIONS. */
32546 static void
32547 rs6000_elf_asm_init_sections (void)
32549 toc_section
32550 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32552 sdata2_section
32553 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32554 SDATA2_SECTION_ASM_OP);
32557 /* Implement TARGET_SELECT_RTX_SECTION. */
32559 static section *
32560 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32561 unsigned HOST_WIDE_INT align)
32563 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32564 return toc_section;
32565 else
32566 return default_elf_select_rtx_section (mode, x, align);
32569 /* For a SYMBOL_REF, set generic flags and then perform some
32570 target-specific processing.
32572 When the AIX ABI is requested on a non-AIX system, replace the
32573 function name with the real name (with a leading .) rather than the
32574 function descriptor name. This saves a lot of overriding code to
32575 read the prefixes. */
32577 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32578 static void
32579 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32581 default_encode_section_info (decl, rtl, first);
32583 if (first
32584 && TREE_CODE (decl) == FUNCTION_DECL
32585 && !TARGET_AIX
32586 && DEFAULT_ABI == ABI_AIX)
32588 rtx sym_ref = XEXP (rtl, 0);
32589 size_t len = strlen (XSTR (sym_ref, 0));
32590 char *str = XALLOCAVEC (char, len + 2);
32591 str[0] = '.';
32592 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32593 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32597 static inline bool
32598 compare_section_name (const char *section, const char *templ)
32600 int len;
32602 len = strlen (templ);
32603 return (strncmp (section, templ, len) == 0
32604 && (section[len] == 0 || section[len] == '.'));
32607 bool
32608 rs6000_elf_in_small_data_p (const_tree decl)
32610 if (rs6000_sdata == SDATA_NONE)
32611 return false;
32613 /* We want to merge strings, so we never consider them small data. */
32614 if (TREE_CODE (decl) == STRING_CST)
32615 return false;
32617 /* Functions are never in the small data area. */
32618 if (TREE_CODE (decl) == FUNCTION_DECL)
32619 return false;
32621 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32623 const char *section = DECL_SECTION_NAME (decl);
32624 if (compare_section_name (section, ".sdata")
32625 || compare_section_name (section, ".sdata2")
32626 || compare_section_name (section, ".gnu.linkonce.s")
32627 || compare_section_name (section, ".sbss")
32628 || compare_section_name (section, ".sbss2")
32629 || compare_section_name (section, ".gnu.linkonce.sb")
32630 || strcmp (section, ".PPC.EMB.sdata0") == 0
32631 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32632 return true;
32634 else
32636 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32638 if (size > 0
32639 && size <= g_switch_value
32640 /* If it's not public, and we're not going to reference it there,
32641 there's no need to put it in the small data section. */
32642 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32643 return true;
32646 return false;
32649 #endif /* USING_ELFOS_H */
32651 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32653 static bool
32654 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32656 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32659 /* Do not place thread-local symbols refs in the object blocks. */
32661 static bool
32662 rs6000_use_blocks_for_decl_p (const_tree decl)
32664 return !DECL_THREAD_LOCAL_P (decl);
32667 /* Return a REG that occurs in ADDR with coefficient 1.
32668 ADDR can be effectively incremented by incrementing REG.
32670 r0 is special and we must not select it as an address
32671 register by this routine since our caller will try to
32672 increment the returned register via an "la" instruction. */
32675 find_addr_reg (rtx addr)
32677 while (GET_CODE (addr) == PLUS)
32679 if (GET_CODE (XEXP (addr, 0)) == REG
32680 && REGNO (XEXP (addr, 0)) != 0)
32681 addr = XEXP (addr, 0);
32682 else if (GET_CODE (XEXP (addr, 1)) == REG
32683 && REGNO (XEXP (addr, 1)) != 0)
32684 addr = XEXP (addr, 1);
32685 else if (CONSTANT_P (XEXP (addr, 0)))
32686 addr = XEXP (addr, 1);
32687 else if (CONSTANT_P (XEXP (addr, 1)))
32688 addr = XEXP (addr, 0);
32689 else
32690 gcc_unreachable ();
32692 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32693 return addr;
32696 void
32697 rs6000_fatal_bad_address (rtx op)
32699 fatal_insn ("bad address", op);
32702 #if TARGET_MACHO
32704 typedef struct branch_island_d {
32705 tree function_name;
32706 tree label_name;
32707 int line_number;
32708 } branch_island;
32711 static vec<branch_island, va_gc> *branch_islands;
32713 /* Remember to generate a branch island for far calls to the given
32714 function. */
32716 static void
32717 add_compiler_branch_island (tree label_name, tree function_name,
32718 int line_number)
32720 branch_island bi = {function_name, label_name, line_number};
32721 vec_safe_push (branch_islands, bi);
32724 /* Generate far-jump branch islands for everything recorded in
32725 branch_islands. Invoked immediately after the last instruction of
32726 the epilogue has been emitted; the branch islands must be appended
32727 to, and contiguous with, the function body. Mach-O stubs are
32728 generated in machopic_output_stub(). */
32730 static void
32731 macho_branch_islands (void)
32733 char tmp_buf[512];
32735 while (!vec_safe_is_empty (branch_islands))
32737 branch_island *bi = &branch_islands->last ();
32738 const char *label = IDENTIFIER_POINTER (bi->label_name);
32739 const char *name = IDENTIFIER_POINTER (bi->function_name);
32740 char name_buf[512];
32741 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32742 if (name[0] == '*' || name[0] == '&')
32743 strcpy (name_buf, name+1);
32744 else
32746 name_buf[0] = '_';
32747 strcpy (name_buf+1, name);
32749 strcpy (tmp_buf, "\n");
32750 strcat (tmp_buf, label);
32751 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32752 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32753 dbxout_stabd (N_SLINE, bi->line_number);
32754 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32755 if (flag_pic)
32757 if (TARGET_LINK_STACK)
32759 char name[32];
32760 get_ppc476_thunk_name (name);
32761 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32762 strcat (tmp_buf, name);
32763 strcat (tmp_buf, "\n");
32764 strcat (tmp_buf, label);
32765 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32767 else
32769 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32770 strcat (tmp_buf, label);
32771 strcat (tmp_buf, "_pic\n");
32772 strcat (tmp_buf, label);
32773 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32776 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32777 strcat (tmp_buf, name_buf);
32778 strcat (tmp_buf, " - ");
32779 strcat (tmp_buf, label);
32780 strcat (tmp_buf, "_pic)\n");
32782 strcat (tmp_buf, "\tmtlr r0\n");
32784 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32785 strcat (tmp_buf, name_buf);
32786 strcat (tmp_buf, " - ");
32787 strcat (tmp_buf, label);
32788 strcat (tmp_buf, "_pic)\n");
32790 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32792 else
32794 strcat (tmp_buf, ":\nlis r12,hi16(");
32795 strcat (tmp_buf, name_buf);
32796 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32797 strcat (tmp_buf, name_buf);
32798 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32800 output_asm_insn (tmp_buf, 0);
32801 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32802 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32803 dbxout_stabd (N_SLINE, bi->line_number);
32804 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32805 branch_islands->pop ();
32809 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32810 already there or not. */
32812 static int
32813 no_previous_def (tree function_name)
32815 branch_island *bi;
32816 unsigned ix;
32818 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32819 if (function_name == bi->function_name)
32820 return 0;
32821 return 1;
32824 /* GET_PREV_LABEL gets the label name from the previous definition of
32825 the function. */
32827 static tree
32828 get_prev_label (tree function_name)
32830 branch_island *bi;
32831 unsigned ix;
32833 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32834 if (function_name == bi->function_name)
32835 return bi->label_name;
32836 return NULL_TREE;
32839 /* INSN is either a function call or a millicode call. It may have an
32840 unconditional jump in its delay slot.
32842 CALL_DEST is the routine we are calling. */
32844 char *
32845 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32846 int cookie_operand_number)
32848 static char buf[256];
32849 if (darwin_emit_branch_islands
32850 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32851 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32853 tree labelname;
32854 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32856 if (no_previous_def (funname))
32858 rtx label_rtx = gen_label_rtx ();
32859 char *label_buf, temp_buf[256];
32860 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32861 CODE_LABEL_NUMBER (label_rtx));
32862 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32863 labelname = get_identifier (label_buf);
32864 add_compiler_branch_island (labelname, funname, insn_line (insn));
32866 else
32867 labelname = get_prev_label (funname);
32869 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32870 instruction will reach 'foo', otherwise link as 'bl L42'".
32871 "L42" should be a 'branch island', that will do a far jump to
32872 'foo'. Branch islands are generated in
32873 macho_branch_islands(). */
32874 sprintf (buf, "jbsr %%z%d,%.246s",
32875 dest_operand_number, IDENTIFIER_POINTER (labelname));
32877 else
32878 sprintf (buf, "bl %%z%d", dest_operand_number);
32879 return buf;
32882 /* Generate PIC and indirect symbol stubs. */
32884 void
32885 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32887 unsigned int length;
32888 char *symbol_name, *lazy_ptr_name;
32889 char *local_label_0;
32890 static int label = 0;
32892 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32893 symb = (*targetm.strip_name_encoding) (symb);
32896 length = strlen (symb);
32897 symbol_name = XALLOCAVEC (char, length + 32);
32898 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
32900 lazy_ptr_name = XALLOCAVEC (char, length + 32);
32901 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
32903 if (flag_pic == 2)
32904 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
32905 else
32906 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
32908 if (flag_pic == 2)
32910 fprintf (file, "\t.align 5\n");
32912 fprintf (file, "%s:\n", stub);
32913 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32915 label++;
32916 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
32917 sprintf (local_label_0, "\"L%011d$spb\"", label);
32919 fprintf (file, "\tmflr r0\n");
32920 if (TARGET_LINK_STACK)
32922 char name[32];
32923 get_ppc476_thunk_name (name);
32924 fprintf (file, "\tbl %s\n", name);
32925 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32927 else
32929 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
32930 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32932 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
32933 lazy_ptr_name, local_label_0);
32934 fprintf (file, "\tmtlr r0\n");
32935 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
32936 (TARGET_64BIT ? "ldu" : "lwzu"),
32937 lazy_ptr_name, local_label_0);
32938 fprintf (file, "\tmtctr r12\n");
32939 fprintf (file, "\tbctr\n");
32941 else
32943 fprintf (file, "\t.align 4\n");
32945 fprintf (file, "%s:\n", stub);
32946 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32948 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
32949 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
32950 (TARGET_64BIT ? "ldu" : "lwzu"),
32951 lazy_ptr_name);
32952 fprintf (file, "\tmtctr r12\n");
32953 fprintf (file, "\tbctr\n");
32956 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
32957 fprintf (file, "%s:\n", lazy_ptr_name);
32958 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32959 fprintf (file, "%sdyld_stub_binding_helper\n",
32960 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
32963 /* Legitimize PIC addresses. If the address is already
32964 position-independent, we return ORIG. Newly generated
32965 position-independent addresses go into a reg. This is REG if non
32966 zero, otherwise we allocate register(s) as necessary. */
32968 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
32971 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
32972 rtx reg)
32974 rtx base, offset;
32976 if (reg == NULL && !reload_completed)
32977 reg = gen_reg_rtx (Pmode);
32979 if (GET_CODE (orig) == CONST)
32981 rtx reg_temp;
32983 if (GET_CODE (XEXP (orig, 0)) == PLUS
32984 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
32985 return orig;
32987 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
32989 /* Use a different reg for the intermediate value, as
32990 it will be marked UNCHANGING. */
32991 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
32992 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
32993 Pmode, reg_temp);
32994 offset =
32995 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
32996 Pmode, reg);
32998 if (GET_CODE (offset) == CONST_INT)
33000 if (SMALL_INT (offset))
33001 return plus_constant (Pmode, base, INTVAL (offset));
33002 else if (!reload_completed)
33003 offset = force_reg (Pmode, offset);
33004 else
33006 rtx mem = force_const_mem (Pmode, orig);
33007 return machopic_legitimize_pic_address (mem, Pmode, reg);
33010 return gen_rtx_PLUS (Pmode, base, offset);
33013 /* Fall back on generic machopic code. */
33014 return machopic_legitimize_pic_address (orig, mode, reg);
33017 /* Output a .machine directive for the Darwin assembler, and call
33018 the generic start_file routine. */
33020 static void
33021 rs6000_darwin_file_start (void)
33023 static const struct
33025 const char *arg;
33026 const char *name;
33027 HOST_WIDE_INT if_set;
33028 } mapping[] = {
33029 { "ppc64", "ppc64", MASK_64BIT },
33030 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33031 { "power4", "ppc970", 0 },
33032 { "G5", "ppc970", 0 },
33033 { "7450", "ppc7450", 0 },
33034 { "7400", "ppc7400", MASK_ALTIVEC },
33035 { "G4", "ppc7400", 0 },
33036 { "750", "ppc750", 0 },
33037 { "740", "ppc750", 0 },
33038 { "G3", "ppc750", 0 },
33039 { "604e", "ppc604e", 0 },
33040 { "604", "ppc604", 0 },
33041 { "603e", "ppc603", 0 },
33042 { "603", "ppc603", 0 },
33043 { "601", "ppc601", 0 },
33044 { NULL, "ppc", 0 } };
33045 const char *cpu_id = "";
33046 size_t i;
33048 rs6000_file_start ();
33049 darwin_file_start ();
33051 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33053 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33054 cpu_id = rs6000_default_cpu;
33056 if (global_options_set.x_rs6000_cpu_index)
33057 cpu_id = processor_target_table[rs6000_cpu_index].name;
33059 /* Look through the mapping array. Pick the first name that either
33060 matches the argument, has a bit set in IF_SET that is also set
33061 in the target flags, or has a NULL name. */
33063 i = 0;
33064 while (mapping[i].arg != NULL
33065 && strcmp (mapping[i].arg, cpu_id) != 0
33066 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33067 i++;
33069 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33072 #endif /* TARGET_MACHO */
33074 #if TARGET_ELF
33075 static int
33076 rs6000_elf_reloc_rw_mask (void)
33078 if (flag_pic)
33079 return 3;
33080 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33081 return 2;
33082 else
33083 return 0;
33086 /* Record an element in the table of global constructors. SYMBOL is
33087 a SYMBOL_REF of the function to be called; PRIORITY is a number
33088 between 0 and MAX_INIT_PRIORITY.
33090 This differs from default_named_section_asm_out_constructor in
33091 that we have special handling for -mrelocatable. */
33093 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33094 static void
33095 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33097 const char *section = ".ctors";
33098 char buf[18];
33100 if (priority != DEFAULT_INIT_PRIORITY)
33102 sprintf (buf, ".ctors.%.5u",
33103 /* Invert the numbering so the linker puts us in the proper
33104 order; constructors are run from right to left, and the
33105 linker sorts in increasing order. */
33106 MAX_INIT_PRIORITY - priority);
33107 section = buf;
33110 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33111 assemble_align (POINTER_SIZE);
33113 if (DEFAULT_ABI == ABI_V4
33114 && (TARGET_RELOCATABLE || flag_pic > 1))
33116 fputs ("\t.long (", asm_out_file);
33117 output_addr_const (asm_out_file, symbol);
33118 fputs (")@fixup\n", asm_out_file);
33120 else
33121 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33124 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33125 static void
33126 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33128 const char *section = ".dtors";
33129 char buf[18];
33131 if (priority != DEFAULT_INIT_PRIORITY)
33133 sprintf (buf, ".dtors.%.5u",
33134 /* Invert the numbering so the linker puts us in the proper
33135 order; constructors are run from right to left, and the
33136 linker sorts in increasing order. */
33137 MAX_INIT_PRIORITY - priority);
33138 section = buf;
33141 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33142 assemble_align (POINTER_SIZE);
33144 if (DEFAULT_ABI == ABI_V4
33145 && (TARGET_RELOCATABLE || flag_pic > 1))
33147 fputs ("\t.long (", asm_out_file);
33148 output_addr_const (asm_out_file, symbol);
33149 fputs (")@fixup\n", asm_out_file);
33151 else
33152 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33155 void
33156 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33158 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33160 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33161 ASM_OUTPUT_LABEL (file, name);
33162 fputs (DOUBLE_INT_ASM_OP, file);
33163 rs6000_output_function_entry (file, name);
33164 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33165 if (DOT_SYMBOLS)
33167 fputs ("\t.size\t", file);
33168 assemble_name (file, name);
33169 fputs (",24\n\t.type\t.", file);
33170 assemble_name (file, name);
33171 fputs (",@function\n", file);
33172 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33174 fputs ("\t.globl\t.", file);
33175 assemble_name (file, name);
33176 putc ('\n', file);
33179 else
33180 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33181 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33182 rs6000_output_function_entry (file, name);
33183 fputs (":\n", file);
33184 return;
33187 if (DEFAULT_ABI == ABI_V4
33188 && (TARGET_RELOCATABLE || flag_pic > 1)
33189 && !TARGET_SECURE_PLT
33190 && (!constant_pool_empty_p () || crtl->profile)
33191 && uses_TOC ())
33193 char buf[256];
33195 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33197 fprintf (file, "\t.long ");
33198 assemble_name (file, toc_label_name);
33199 need_toc_init = 1;
33200 putc ('-', file);
33201 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33202 assemble_name (file, buf);
33203 putc ('\n', file);
33206 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33207 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33209 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33211 char buf[256];
33213 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33215 fprintf (file, "\t.quad .TOC.-");
33216 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33217 assemble_name (file, buf);
33218 putc ('\n', file);
33221 if (DEFAULT_ABI == ABI_AIX)
33223 const char *desc_name, *orig_name;
33225 orig_name = (*targetm.strip_name_encoding) (name);
33226 desc_name = orig_name;
33227 while (*desc_name == '.')
33228 desc_name++;
33230 if (TREE_PUBLIC (decl))
33231 fprintf (file, "\t.globl %s\n", desc_name);
33233 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33234 fprintf (file, "%s:\n", desc_name);
33235 fprintf (file, "\t.long %s\n", orig_name);
33236 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33237 fputs ("\t.long 0\n", file);
33238 fprintf (file, "\t.previous\n");
33240 ASM_OUTPUT_LABEL (file, name);
33243 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33244 static void
33245 rs6000_elf_file_end (void)
33247 #ifdef HAVE_AS_GNU_ATTRIBUTE
33248 /* ??? The value emitted depends on options active at file end.
33249 Assume anyone using #pragma or attributes that might change
33250 options knows what they are doing. */
33251 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33252 && rs6000_passes_float)
33254 int fp;
33256 if (TARGET_DF_FPR)
33257 fp = 1;
33258 else if (TARGET_SF_FPR)
33259 fp = 3;
33260 else
33261 fp = 2;
33262 if (rs6000_passes_long_double)
33264 if (!TARGET_LONG_DOUBLE_128)
33265 fp |= 2 * 4;
33266 else if (TARGET_IEEEQUAD)
33267 fp |= 3 * 4;
33268 else
33269 fp |= 1 * 4;
33271 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33273 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33275 if (rs6000_passes_vector)
33276 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33277 (TARGET_ALTIVEC_ABI ? 2 : 1));
33278 if (rs6000_returns_struct)
33279 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33280 aix_struct_return ? 2 : 1);
33282 #endif
33283 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33284 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33285 file_end_indicate_exec_stack ();
33286 #endif
33288 if (flag_split_stack)
33289 file_end_indicate_split_stack ();
33291 if (cpu_builtin_p)
33293 /* We have expanded a CPU builtin, so we need to emit a reference to
33294 the special symbol that LIBC uses to declare it supports the
33295 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33296 switch_to_section (data_section);
33297 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33298 fprintf (asm_out_file, "\t%s %s\n",
33299 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33302 #endif
33304 #if TARGET_XCOFF
33306 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33307 #define HAVE_XCOFF_DWARF_EXTRAS 0
33308 #endif
33310 static enum unwind_info_type
33311 rs6000_xcoff_debug_unwind_info (void)
33313 return UI_NONE;
33316 static void
33317 rs6000_xcoff_asm_output_anchor (rtx symbol)
33319 char buffer[100];
33321 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33322 SYMBOL_REF_BLOCK_OFFSET (symbol));
33323 fprintf (asm_out_file, "%s", SET_ASM_OP);
33324 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33325 fprintf (asm_out_file, ",");
33326 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33327 fprintf (asm_out_file, "\n");
33330 static void
33331 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33333 fputs (GLOBAL_ASM_OP, stream);
33334 RS6000_OUTPUT_BASENAME (stream, name);
33335 putc ('\n', stream);
33338 /* A get_unnamed_decl callback, used for read-only sections. PTR
33339 points to the section string variable. */
33341 static void
33342 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33344 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33345 *(const char *const *) directive,
33346 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33349 /* Likewise for read-write sections. */
33351 static void
33352 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33354 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33355 *(const char *const *) directive,
33356 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33359 static void
33360 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33362 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33363 *(const char *const *) directive,
33364 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33367 /* A get_unnamed_section callback, used for switching to toc_section. */
33369 static void
33370 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33372 if (TARGET_MINIMAL_TOC)
33374 /* toc_section is always selected at least once from
33375 rs6000_xcoff_file_start, so this is guaranteed to
33376 always be defined once and only once in each file. */
33377 if (!toc_initialized)
33379 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33380 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33381 toc_initialized = 1;
33383 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33384 (TARGET_32BIT ? "" : ",3"));
33386 else
33387 fputs ("\t.toc\n", asm_out_file);
33390 /* Implement TARGET_ASM_INIT_SECTIONS. */
33392 static void
33393 rs6000_xcoff_asm_init_sections (void)
33395 read_only_data_section
33396 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33397 &xcoff_read_only_section_name);
33399 private_data_section
33400 = get_unnamed_section (SECTION_WRITE,
33401 rs6000_xcoff_output_readwrite_section_asm_op,
33402 &xcoff_private_data_section_name);
33404 tls_data_section
33405 = get_unnamed_section (SECTION_TLS,
33406 rs6000_xcoff_output_tls_section_asm_op,
33407 &xcoff_tls_data_section_name);
33409 tls_private_data_section
33410 = get_unnamed_section (SECTION_TLS,
33411 rs6000_xcoff_output_tls_section_asm_op,
33412 &xcoff_private_data_section_name);
33414 read_only_private_data_section
33415 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33416 &xcoff_private_data_section_name);
33418 toc_section
33419 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33421 readonly_data_section = read_only_data_section;
33424 static int
33425 rs6000_xcoff_reloc_rw_mask (void)
33427 return 3;
33430 static void
33431 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33432 tree decl ATTRIBUTE_UNUSED)
33434 int smclass;
33435 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33437 if (flags & SECTION_EXCLUDE)
33438 smclass = 4;
33439 else if (flags & SECTION_DEBUG)
33441 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33442 return;
33444 else if (flags & SECTION_CODE)
33445 smclass = 0;
33446 else if (flags & SECTION_TLS)
33447 smclass = 3;
33448 else if (flags & SECTION_WRITE)
33449 smclass = 2;
33450 else
33451 smclass = 1;
33453 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33454 (flags & SECTION_CODE) ? "." : "",
33455 name, suffix[smclass], flags & SECTION_ENTSIZE);
33458 #define IN_NAMED_SECTION(DECL) \
33459 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33460 && DECL_SECTION_NAME (DECL) != NULL)
33462 static section *
33463 rs6000_xcoff_select_section (tree decl, int reloc,
33464 unsigned HOST_WIDE_INT align)
33466 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33467 named section. */
33468 if (align > BIGGEST_ALIGNMENT)
33470 resolve_unique_section (decl, reloc, true);
33471 if (IN_NAMED_SECTION (decl))
33472 return get_named_section (decl, NULL, reloc);
33475 if (decl_readonly_section (decl, reloc))
33477 if (TREE_PUBLIC (decl))
33478 return read_only_data_section;
33479 else
33480 return read_only_private_data_section;
33482 else
33484 #if HAVE_AS_TLS
33485 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33487 if (TREE_PUBLIC (decl))
33488 return tls_data_section;
33489 else if (bss_initializer_p (decl))
33491 /* Convert to COMMON to emit in BSS. */
33492 DECL_COMMON (decl) = 1;
33493 return tls_comm_section;
33495 else
33496 return tls_private_data_section;
33498 else
33499 #endif
33500 if (TREE_PUBLIC (decl))
33501 return data_section;
33502 else
33503 return private_data_section;
33507 static void
33508 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33510 const char *name;
33512 /* Use select_section for private data and uninitialized data with
33513 alignment <= BIGGEST_ALIGNMENT. */
33514 if (!TREE_PUBLIC (decl)
33515 || DECL_COMMON (decl)
33516 || (DECL_INITIAL (decl) == NULL_TREE
33517 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33518 || DECL_INITIAL (decl) == error_mark_node
33519 || (flag_zero_initialized_in_bss
33520 && initializer_zerop (DECL_INITIAL (decl))))
33521 return;
33523 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33524 name = (*targetm.strip_name_encoding) (name);
33525 set_decl_section_name (decl, name);
33528 /* Select section for constant in constant pool.
33530 On RS/6000, all constants are in the private read-only data area.
33531 However, if this is being placed in the TOC it must be output as a
33532 toc entry. */
33534 static section *
33535 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33536 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33538 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33539 return toc_section;
33540 else
33541 return read_only_private_data_section;
33544 /* Remove any trailing [DS] or the like from the symbol name. */
33546 static const char *
33547 rs6000_xcoff_strip_name_encoding (const char *name)
33549 size_t len;
33550 if (*name == '*')
33551 name++;
33552 len = strlen (name);
33553 if (name[len - 1] == ']')
33554 return ggc_alloc_string (name, len - 4);
33555 else
33556 return name;
33559 /* Section attributes. AIX is always PIC. */
33561 static unsigned int
33562 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33564 unsigned int align;
33565 unsigned int flags = default_section_type_flags (decl, name, reloc);
33567 /* Align to at least UNIT size. */
33568 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33569 align = MIN_UNITS_PER_WORD;
33570 else
33571 /* Increase alignment of large objects if not already stricter. */
33572 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33573 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33574 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33576 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33579 /* Output at beginning of assembler file.
33581 Initialize the section names for the RS/6000 at this point.
33583 Specify filename, including full path, to assembler.
33585 We want to go into the TOC section so at least one .toc will be emitted.
33586 Also, in order to output proper .bs/.es pairs, we need at least one static
33587 [RW] section emitted.
33589 Finally, declare mcount when profiling to make the assembler happy. */
33591 static void
33592 rs6000_xcoff_file_start (void)
33594 rs6000_gen_section_name (&xcoff_bss_section_name,
33595 main_input_filename, ".bss_");
33596 rs6000_gen_section_name (&xcoff_private_data_section_name,
33597 main_input_filename, ".rw_");
33598 rs6000_gen_section_name (&xcoff_read_only_section_name,
33599 main_input_filename, ".ro_");
33600 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33601 main_input_filename, ".tls_");
33602 rs6000_gen_section_name (&xcoff_tbss_section_name,
33603 main_input_filename, ".tbss_[UL]");
33605 fputs ("\t.file\t", asm_out_file);
33606 output_quoted_string (asm_out_file, main_input_filename);
33607 fputc ('\n', asm_out_file);
33608 if (write_symbols != NO_DEBUG)
33609 switch_to_section (private_data_section);
33610 switch_to_section (toc_section);
33611 switch_to_section (text_section);
33612 if (profile_flag)
33613 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33614 rs6000_file_start ();
33617 /* Output at end of assembler file.
33618 On the RS/6000, referencing data should automatically pull in text. */
33620 static void
33621 rs6000_xcoff_file_end (void)
33623 switch_to_section (text_section);
33624 fputs ("_section_.text:\n", asm_out_file);
33625 switch_to_section (data_section);
33626 fputs (TARGET_32BIT
33627 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33628 asm_out_file);
33631 struct declare_alias_data
33633 FILE *file;
33634 bool function_descriptor;
33637 /* Declare alias N. A helper function for for_node_and_aliases. */
33639 static bool
33640 rs6000_declare_alias (struct symtab_node *n, void *d)
33642 struct declare_alias_data *data = (struct declare_alias_data *)d;
33643 /* Main symbol is output specially, because varasm machinery does part of
33644 the job for us - we do not need to declare .globl/lglobs and such. */
33645 if (!n->alias || n->weakref)
33646 return false;
33648 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33649 return false;
33651 /* Prevent assemble_alias from trying to use .set pseudo operation
33652 that does not behave as expected by the middle-end. */
33653 TREE_ASM_WRITTEN (n->decl) = true;
33655 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33656 char *buffer = (char *) alloca (strlen (name) + 2);
33657 char *p;
33658 int dollar_inside = 0;
33660 strcpy (buffer, name);
33661 p = strchr (buffer, '$');
33662 while (p) {
33663 *p = '_';
33664 dollar_inside++;
33665 p = strchr (p + 1, '$');
33667 if (TREE_PUBLIC (n->decl))
33669 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33671 if (dollar_inside) {
33672 if (data->function_descriptor)
33673 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33674 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33676 if (data->function_descriptor)
33678 fputs ("\t.globl .", data->file);
33679 RS6000_OUTPUT_BASENAME (data->file, buffer);
33680 putc ('\n', data->file);
33682 fputs ("\t.globl ", data->file);
33683 RS6000_OUTPUT_BASENAME (data->file, buffer);
33684 putc ('\n', data->file);
33686 #ifdef ASM_WEAKEN_DECL
33687 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33688 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33689 #endif
33691 else
33693 if (dollar_inside)
33695 if (data->function_descriptor)
33696 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33697 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33699 if (data->function_descriptor)
33701 fputs ("\t.lglobl .", data->file);
33702 RS6000_OUTPUT_BASENAME (data->file, buffer);
33703 putc ('\n', data->file);
33705 fputs ("\t.lglobl ", data->file);
33706 RS6000_OUTPUT_BASENAME (data->file, buffer);
33707 putc ('\n', data->file);
33709 if (data->function_descriptor)
33710 fputs (".", data->file);
33711 RS6000_OUTPUT_BASENAME (data->file, buffer);
33712 fputs (":\n", data->file);
33713 return false;
33717 #ifdef HAVE_GAS_HIDDEN
33718 /* Helper function to calculate visibility of a DECL
33719 and return the value as a const string. */
33721 static const char *
33722 rs6000_xcoff_visibility (tree decl)
33724 static const char * const visibility_types[] = {
33725 "", ",protected", ",hidden", ",internal"
33728 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33730 if (TREE_CODE (decl) == FUNCTION_DECL
33731 && cgraph_node::get (decl)
33732 && cgraph_node::get (decl)->instrumentation_clone
33733 && cgraph_node::get (decl)->instrumented_version)
33734 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
33736 return visibility_types[vis];
33738 #endif
33741 /* This macro produces the initial definition of a function name.
33742 On the RS/6000, we need to place an extra '.' in the function name and
33743 output the function descriptor.
33744 Dollar signs are converted to underscores.
33746 The csect for the function will have already been created when
33747 text_section was selected. We do have to go back to that csect, however.
33749 The third and fourth parameters to the .function pseudo-op (16 and 044)
33750 are placeholders which no longer have any use.
33752 Because AIX assembler's .set command has unexpected semantics, we output
33753 all aliases as alternative labels in front of the definition. */
33755 void
33756 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33758 char *buffer = (char *) alloca (strlen (name) + 1);
33759 char *p;
33760 int dollar_inside = 0;
33761 struct declare_alias_data data = {file, false};
33763 strcpy (buffer, name);
33764 p = strchr (buffer, '$');
33765 while (p) {
33766 *p = '_';
33767 dollar_inside++;
33768 p = strchr (p + 1, '$');
33770 if (TREE_PUBLIC (decl))
33772 if (!RS6000_WEAK || !DECL_WEAK (decl))
33774 if (dollar_inside) {
33775 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33776 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33778 fputs ("\t.globl .", file);
33779 RS6000_OUTPUT_BASENAME (file, buffer);
33780 #ifdef HAVE_GAS_HIDDEN
33781 fputs (rs6000_xcoff_visibility (decl), file);
33782 #endif
33783 putc ('\n', file);
33786 else
33788 if (dollar_inside) {
33789 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33790 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33792 fputs ("\t.lglobl .", file);
33793 RS6000_OUTPUT_BASENAME (file, buffer);
33794 putc ('\n', file);
33796 fputs ("\t.csect ", file);
33797 RS6000_OUTPUT_BASENAME (file, buffer);
33798 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33799 RS6000_OUTPUT_BASENAME (file, buffer);
33800 fputs (":\n", file);
33801 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33802 &data, true);
33803 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33804 RS6000_OUTPUT_BASENAME (file, buffer);
33805 fputs (", TOC[tc0], 0\n", file);
33806 in_section = NULL;
33807 switch_to_section (function_section (decl));
33808 putc ('.', file);
33809 RS6000_OUTPUT_BASENAME (file, buffer);
33810 fputs (":\n", file);
33811 data.function_descriptor = true;
33812 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33813 &data, true);
33814 if (!DECL_IGNORED_P (decl))
33816 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33817 xcoffout_declare_function (file, decl, buffer);
33818 else if (write_symbols == DWARF2_DEBUG)
33820 name = (*targetm.strip_name_encoding) (name);
33821 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33824 return;
33828 /* Output assembly language to globalize a symbol from a DECL,
33829 possibly with visibility. */
33831 void
33832 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33834 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33835 fputs (GLOBAL_ASM_OP, stream);
33836 RS6000_OUTPUT_BASENAME (stream, name);
33837 #ifdef HAVE_GAS_HIDDEN
33838 fputs (rs6000_xcoff_visibility (decl), stream);
33839 #endif
33840 putc ('\n', stream);
33843 /* Output assembly language to define a symbol as COMMON from a DECL,
33844 possibly with visibility. */
33846 void
33847 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33848 tree decl ATTRIBUTE_UNUSED,
33849 const char *name,
33850 unsigned HOST_WIDE_INT size,
33851 unsigned HOST_WIDE_INT align)
33853 unsigned HOST_WIDE_INT align2 = 2;
33855 if (align > 32)
33856 align2 = floor_log2 (align / BITS_PER_UNIT);
33857 else if (size > 4)
33858 align2 = 3;
33860 fputs (COMMON_ASM_OP, stream);
33861 RS6000_OUTPUT_BASENAME (stream, name);
33863 fprintf (stream,
33864 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33865 size, align2);
33867 #ifdef HAVE_GAS_HIDDEN
33868 fputs (rs6000_xcoff_visibility (decl), stream);
33869 #endif
33870 putc ('\n', stream);
33873 /* This macro produces the initial definition of a object (variable) name.
33874 Because AIX assembler's .set command has unexpected semantics, we output
33875 all aliases as alternative labels in front of the definition. */
33877 void
33878 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33880 struct declare_alias_data data = {file, false};
33881 RS6000_OUTPUT_BASENAME (file, name);
33882 fputs (":\n", file);
33883 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33884 &data, true);
33887 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33889 void
33890 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33892 fputs (integer_asm_op (size, FALSE), file);
33893 assemble_name (file, label);
33894 fputs ("-$", file);
33897 /* Output a symbol offset relative to the dbase for the current object.
33898 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33899 signed offsets.
33901 __gcc_unwind_dbase is embedded in all executables/libraries through
33902 libgcc/config/rs6000/crtdbase.S. */
33904 void
33905 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
33907 fputs (integer_asm_op (size, FALSE), file);
33908 assemble_name (file, label);
33909 fputs("-__gcc_unwind_dbase", file);
33912 #ifdef HAVE_AS_TLS
33913 static void
33914 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
33916 rtx symbol;
33917 int flags;
33918 const char *symname;
33920 default_encode_section_info (decl, rtl, first);
33922 /* Careful not to prod global register variables. */
33923 if (!MEM_P (rtl))
33924 return;
33925 symbol = XEXP (rtl, 0);
33926 if (GET_CODE (symbol) != SYMBOL_REF)
33927 return;
33929 flags = SYMBOL_REF_FLAGS (symbol);
33931 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33932 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
33934 SYMBOL_REF_FLAGS (symbol) = flags;
33936 /* Append mapping class to extern decls. */
33937 symname = XSTR (symbol, 0);
33938 if (decl /* sync condition with assemble_external () */
33939 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
33940 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
33941 || TREE_CODE (decl) == FUNCTION_DECL)
33942 && symname[strlen (symname) - 1] != ']')
33944 char *newname = (char *) alloca (strlen (symname) + 5);
33945 strcpy (newname, symname);
33946 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
33947 ? "[DS]" : "[UA]"));
33948 XSTR (symbol, 0) = ggc_strdup (newname);
33951 #endif /* HAVE_AS_TLS */
33952 #endif /* TARGET_XCOFF */
33954 void
33955 rs6000_asm_weaken_decl (FILE *stream, tree decl,
33956 const char *name, const char *val)
33958 fputs ("\t.weak\t", stream);
33959 RS6000_OUTPUT_BASENAME (stream, name);
33960 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33961 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33963 if (TARGET_XCOFF)
33964 fputs ("[DS]", stream);
33965 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33966 if (TARGET_XCOFF)
33967 fputs (rs6000_xcoff_visibility (decl), stream);
33968 #endif
33969 fputs ("\n\t.weak\t.", stream);
33970 RS6000_OUTPUT_BASENAME (stream, name);
33972 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33973 if (TARGET_XCOFF)
33974 fputs (rs6000_xcoff_visibility (decl), stream);
33975 #endif
33976 fputc ('\n', stream);
33977 if (val)
33979 #ifdef ASM_OUTPUT_DEF
33980 ASM_OUTPUT_DEF (stream, name, val);
33981 #endif
33982 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33983 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33985 fputs ("\t.set\t.", stream);
33986 RS6000_OUTPUT_BASENAME (stream, name);
33987 fputs (",.", stream);
33988 RS6000_OUTPUT_BASENAME (stream, val);
33989 fputc ('\n', stream);
33995 /* Return true if INSN should not be copied. */
33997 static bool
33998 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34000 return recog_memoized (insn) >= 0
34001 && get_attr_cannot_copy (insn);
34004 /* Compute a (partial) cost for rtx X. Return true if the complete
34005 cost has been computed, and false if subexpressions should be
34006 scanned. In either case, *TOTAL contains the cost result. */
34008 static bool
34009 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34010 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34012 int code = GET_CODE (x);
34014 switch (code)
34016 /* On the RS/6000, if it is valid in the insn, it is free. */
34017 case CONST_INT:
34018 if (((outer_code == SET
34019 || outer_code == PLUS
34020 || outer_code == MINUS)
34021 && (satisfies_constraint_I (x)
34022 || satisfies_constraint_L (x)))
34023 || (outer_code == AND
34024 && (satisfies_constraint_K (x)
34025 || (mode == SImode
34026 ? satisfies_constraint_L (x)
34027 : satisfies_constraint_J (x))))
34028 || ((outer_code == IOR || outer_code == XOR)
34029 && (satisfies_constraint_K (x)
34030 || (mode == SImode
34031 ? satisfies_constraint_L (x)
34032 : satisfies_constraint_J (x))))
34033 || outer_code == ASHIFT
34034 || outer_code == ASHIFTRT
34035 || outer_code == LSHIFTRT
34036 || outer_code == ROTATE
34037 || outer_code == ROTATERT
34038 || outer_code == ZERO_EXTRACT
34039 || (outer_code == MULT
34040 && satisfies_constraint_I (x))
34041 || ((outer_code == DIV || outer_code == UDIV
34042 || outer_code == MOD || outer_code == UMOD)
34043 && exact_log2 (INTVAL (x)) >= 0)
34044 || (outer_code == COMPARE
34045 && (satisfies_constraint_I (x)
34046 || satisfies_constraint_K (x)))
34047 || ((outer_code == EQ || outer_code == NE)
34048 && (satisfies_constraint_I (x)
34049 || satisfies_constraint_K (x)
34050 || (mode == SImode
34051 ? satisfies_constraint_L (x)
34052 : satisfies_constraint_J (x))))
34053 || (outer_code == GTU
34054 && satisfies_constraint_I (x))
34055 || (outer_code == LTU
34056 && satisfies_constraint_P (x)))
34058 *total = 0;
34059 return true;
34061 else if ((outer_code == PLUS
34062 && reg_or_add_cint_operand (x, VOIDmode))
34063 || (outer_code == MINUS
34064 && reg_or_sub_cint_operand (x, VOIDmode))
34065 || ((outer_code == SET
34066 || outer_code == IOR
34067 || outer_code == XOR)
34068 && (INTVAL (x)
34069 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34071 *total = COSTS_N_INSNS (1);
34072 return true;
34074 /* FALLTHRU */
34076 case CONST_DOUBLE:
34077 case CONST_WIDE_INT:
34078 case CONST:
34079 case HIGH:
34080 case SYMBOL_REF:
34081 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34082 return true;
34084 case MEM:
34085 /* When optimizing for size, MEM should be slightly more expensive
34086 than generating address, e.g., (plus (reg) (const)).
34087 L1 cache latency is about two instructions. */
34088 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34089 if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
34090 *total += COSTS_N_INSNS (100);
34091 return true;
34093 case LABEL_REF:
34094 *total = 0;
34095 return true;
34097 case PLUS:
34098 case MINUS:
34099 if (FLOAT_MODE_P (mode))
34100 *total = rs6000_cost->fp;
34101 else
34102 *total = COSTS_N_INSNS (1);
34103 return false;
34105 case MULT:
34106 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34107 && satisfies_constraint_I (XEXP (x, 1)))
34109 if (INTVAL (XEXP (x, 1)) >= -256
34110 && INTVAL (XEXP (x, 1)) <= 255)
34111 *total = rs6000_cost->mulsi_const9;
34112 else
34113 *total = rs6000_cost->mulsi_const;
34115 else if (mode == SFmode)
34116 *total = rs6000_cost->fp;
34117 else if (FLOAT_MODE_P (mode))
34118 *total = rs6000_cost->dmul;
34119 else if (mode == DImode)
34120 *total = rs6000_cost->muldi;
34121 else
34122 *total = rs6000_cost->mulsi;
34123 return false;
34125 case FMA:
34126 if (mode == SFmode)
34127 *total = rs6000_cost->fp;
34128 else
34129 *total = rs6000_cost->dmul;
34130 break;
34132 case DIV:
34133 case MOD:
34134 if (FLOAT_MODE_P (mode))
34136 *total = mode == DFmode ? rs6000_cost->ddiv
34137 : rs6000_cost->sdiv;
34138 return false;
34140 /* FALLTHRU */
34142 case UDIV:
34143 case UMOD:
34144 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34145 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34147 if (code == DIV || code == MOD)
34148 /* Shift, addze */
34149 *total = COSTS_N_INSNS (2);
34150 else
34151 /* Shift */
34152 *total = COSTS_N_INSNS (1);
34154 else
34156 if (GET_MODE (XEXP (x, 1)) == DImode)
34157 *total = rs6000_cost->divdi;
34158 else
34159 *total = rs6000_cost->divsi;
34161 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34162 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34163 *total += COSTS_N_INSNS (2);
34164 return false;
34166 case CTZ:
34167 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34168 return false;
34170 case FFS:
34171 *total = COSTS_N_INSNS (4);
34172 return false;
34174 case POPCOUNT:
34175 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34176 return false;
34178 case PARITY:
34179 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34180 return false;
34182 case NOT:
34183 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34184 *total = 0;
34185 else
34186 *total = COSTS_N_INSNS (1);
34187 return false;
34189 case AND:
34190 if (CONST_INT_P (XEXP (x, 1)))
34192 rtx left = XEXP (x, 0);
34193 rtx_code left_code = GET_CODE (left);
34195 /* rotate-and-mask: 1 insn. */
34196 if ((left_code == ROTATE
34197 || left_code == ASHIFT
34198 || left_code == LSHIFTRT)
34199 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34201 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34202 if (!CONST_INT_P (XEXP (left, 1)))
34203 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34204 *total += COSTS_N_INSNS (1);
34205 return true;
34208 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34209 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34210 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34211 || (val & 0xffff) == val
34212 || (val & 0xffff0000) == val
34213 || ((val & 0xffff) == 0 && mode == SImode))
34215 *total = rtx_cost (left, mode, AND, 0, speed);
34216 *total += COSTS_N_INSNS (1);
34217 return true;
34220 /* 2 insns. */
34221 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34223 *total = rtx_cost (left, mode, AND, 0, speed);
34224 *total += COSTS_N_INSNS (2);
34225 return true;
34229 *total = COSTS_N_INSNS (1);
34230 return false;
34232 case IOR:
34233 /* FIXME */
34234 *total = COSTS_N_INSNS (1);
34235 return true;
34237 case CLZ:
34238 case XOR:
34239 case ZERO_EXTRACT:
34240 *total = COSTS_N_INSNS (1);
34241 return false;
34243 case ASHIFT:
34244 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34245 the sign extend and shift separately within the insn. */
34246 if (TARGET_EXTSWSLI && mode == DImode
34247 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34248 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34250 *total = 0;
34251 return false;
34253 /* fall through */
34255 case ASHIFTRT:
34256 case LSHIFTRT:
34257 case ROTATE:
34258 case ROTATERT:
34259 /* Handle mul_highpart. */
34260 if (outer_code == TRUNCATE
34261 && GET_CODE (XEXP (x, 0)) == MULT)
34263 if (mode == DImode)
34264 *total = rs6000_cost->muldi;
34265 else
34266 *total = rs6000_cost->mulsi;
34267 return true;
34269 else if (outer_code == AND)
34270 *total = 0;
34271 else
34272 *total = COSTS_N_INSNS (1);
34273 return false;
34275 case SIGN_EXTEND:
34276 case ZERO_EXTEND:
34277 if (GET_CODE (XEXP (x, 0)) == MEM)
34278 *total = 0;
34279 else
34280 *total = COSTS_N_INSNS (1);
34281 return false;
34283 case COMPARE:
34284 case NEG:
34285 case ABS:
34286 if (!FLOAT_MODE_P (mode))
34288 *total = COSTS_N_INSNS (1);
34289 return false;
34291 /* FALLTHRU */
34293 case FLOAT:
34294 case UNSIGNED_FLOAT:
34295 case FIX:
34296 case UNSIGNED_FIX:
34297 case FLOAT_TRUNCATE:
34298 *total = rs6000_cost->fp;
34299 return false;
34301 case FLOAT_EXTEND:
34302 if (mode == DFmode)
34303 *total = rs6000_cost->sfdf_convert;
34304 else
34305 *total = rs6000_cost->fp;
34306 return false;
34308 case UNSPEC:
34309 switch (XINT (x, 1))
34311 case UNSPEC_FRSP:
34312 *total = rs6000_cost->fp;
34313 return true;
34315 default:
34316 break;
34318 break;
34320 case CALL:
34321 case IF_THEN_ELSE:
34322 if (!speed)
34324 *total = COSTS_N_INSNS (1);
34325 return true;
34327 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34329 *total = rs6000_cost->fp;
34330 return false;
34332 break;
34334 case NE:
34335 case EQ:
34336 case GTU:
34337 case LTU:
34338 /* Carry bit requires mode == Pmode.
34339 NEG or PLUS already counted so only add one. */
34340 if (mode == Pmode
34341 && (outer_code == NEG || outer_code == PLUS))
34343 *total = COSTS_N_INSNS (1);
34344 return true;
34346 if (outer_code == SET)
34348 if (XEXP (x, 1) == const0_rtx)
34350 if (TARGET_ISEL && !TARGET_MFCRF)
34351 *total = COSTS_N_INSNS (8);
34352 else
34353 *total = COSTS_N_INSNS (2);
34354 return true;
34356 else
34358 *total = COSTS_N_INSNS (3);
34359 return false;
34362 /* FALLTHRU */
34364 case GT:
34365 case LT:
34366 case UNORDERED:
34367 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34369 if (TARGET_ISEL && !TARGET_MFCRF)
34370 *total = COSTS_N_INSNS (8);
34371 else
34372 *total = COSTS_N_INSNS (2);
34373 return true;
34375 /* CC COMPARE. */
34376 if (outer_code == COMPARE)
34378 *total = 0;
34379 return true;
34381 break;
34383 default:
34384 break;
34387 return false;
34390 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34392 static bool
34393 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34394 int opno, int *total, bool speed)
34396 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34398 fprintf (stderr,
34399 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34400 "opno = %d, total = %d, speed = %s, x:\n",
34401 ret ? "complete" : "scan inner",
34402 GET_MODE_NAME (mode),
34403 GET_RTX_NAME (outer_code),
34404 opno,
34405 *total,
34406 speed ? "true" : "false");
34408 debug_rtx (x);
34410 return ret;
34413 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34415 static int
34416 rs6000_debug_address_cost (rtx x, machine_mode mode,
34417 addr_space_t as, bool speed)
34419 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34421 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34422 ret, speed ? "true" : "false");
34423 debug_rtx (x);
34425 return ret;
34429 /* A C expression returning the cost of moving data from a register of class
34430 CLASS1 to one of CLASS2. */
34432 static int
34433 rs6000_register_move_cost (machine_mode mode,
34434 reg_class_t from, reg_class_t to)
34436 int ret;
34438 if (TARGET_DEBUG_COST)
34439 dbg_cost_ctrl++;
34441 /* Moves from/to GENERAL_REGS. */
34442 if (reg_classes_intersect_p (to, GENERAL_REGS)
34443 || reg_classes_intersect_p (from, GENERAL_REGS))
34445 reg_class_t rclass = from;
34447 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34448 rclass = to;
34450 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34451 ret = (rs6000_memory_move_cost (mode, rclass, false)
34452 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34454 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34455 shift. */
34456 else if (rclass == CR_REGS)
34457 ret = 4;
34459 /* For those processors that have slow LR/CTR moves, make them more
34460 expensive than memory in order to bias spills to memory .*/
34461 else if ((rs6000_cpu == PROCESSOR_POWER6
34462 || rs6000_cpu == PROCESSOR_POWER7
34463 || rs6000_cpu == PROCESSOR_POWER8
34464 || rs6000_cpu == PROCESSOR_POWER9)
34465 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34466 ret = 6 * hard_regno_nregs[0][mode];
34468 else
34469 /* A move will cost one instruction per GPR moved. */
34470 ret = 2 * hard_regno_nregs[0][mode];
34473 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34474 else if (VECTOR_MEM_VSX_P (mode)
34475 && reg_classes_intersect_p (to, VSX_REGS)
34476 && reg_classes_intersect_p (from, VSX_REGS))
34477 ret = 2 * hard_regno_nregs[FIRST_FPR_REGNO][mode];
34479 /* Moving between two similar registers is just one instruction. */
34480 else if (reg_classes_intersect_p (to, from))
34481 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34483 /* Everything else has to go through GENERAL_REGS. */
34484 else
34485 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34486 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34488 if (TARGET_DEBUG_COST)
34490 if (dbg_cost_ctrl == 1)
34491 fprintf (stderr,
34492 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34493 ret, GET_MODE_NAME (mode), reg_class_names[from],
34494 reg_class_names[to]);
34495 dbg_cost_ctrl--;
34498 return ret;
34501 /* A C expressions returning the cost of moving data of MODE from a register to
34502 or from memory. */
34504 static int
34505 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34506 bool in ATTRIBUTE_UNUSED)
34508 int ret;
34510 if (TARGET_DEBUG_COST)
34511 dbg_cost_ctrl++;
34513 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34514 ret = 4 * hard_regno_nregs[0][mode];
34515 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34516 || reg_classes_intersect_p (rclass, VSX_REGS)))
34517 ret = 4 * hard_regno_nregs[32][mode];
34518 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34519 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
34520 else
34521 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34523 if (TARGET_DEBUG_COST)
34525 if (dbg_cost_ctrl == 1)
34526 fprintf (stderr,
34527 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34528 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34529 dbg_cost_ctrl--;
34532 return ret;
34535 /* Returns a code for a target-specific builtin that implements
34536 reciprocal of the function, or NULL_TREE if not available. */
34538 static tree
34539 rs6000_builtin_reciprocal (tree fndecl)
34541 switch (DECL_FUNCTION_CODE (fndecl))
34543 case VSX_BUILTIN_XVSQRTDP:
34544 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34545 return NULL_TREE;
34547 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34549 case VSX_BUILTIN_XVSQRTSP:
34550 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34551 return NULL_TREE;
34553 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34555 default:
34556 return NULL_TREE;
34560 /* Load up a constant. If the mode is a vector mode, splat the value across
34561 all of the vector elements. */
34563 static rtx
34564 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34566 rtx reg;
34568 if (mode == SFmode || mode == DFmode)
34570 rtx d = const_double_from_real_value (dconst, mode);
34571 reg = force_reg (mode, d);
34573 else if (mode == V4SFmode)
34575 rtx d = const_double_from_real_value (dconst, SFmode);
34576 rtvec v = gen_rtvec (4, d, d, d, d);
34577 reg = gen_reg_rtx (mode);
34578 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34580 else if (mode == V2DFmode)
34582 rtx d = const_double_from_real_value (dconst, DFmode);
34583 rtvec v = gen_rtvec (2, d, d);
34584 reg = gen_reg_rtx (mode);
34585 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34587 else
34588 gcc_unreachable ();
34590 return reg;
34593 /* Generate an FMA instruction. */
34595 static void
34596 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34598 machine_mode mode = GET_MODE (target);
34599 rtx dst;
34601 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34602 gcc_assert (dst != NULL);
34604 if (dst != target)
34605 emit_move_insn (target, dst);
34608 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34610 static void
34611 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34613 machine_mode mode = GET_MODE (dst);
34614 rtx r;
34616 /* This is a tad more complicated, since the fnma_optab is for
34617 a different expression: fma(-m1, m2, a), which is the same
34618 thing except in the case of signed zeros.
34620 Fortunately we know that if FMA is supported that FNMSUB is
34621 also supported in the ISA. Just expand it directly. */
34623 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34625 r = gen_rtx_NEG (mode, a);
34626 r = gen_rtx_FMA (mode, m1, m2, r);
34627 r = gen_rtx_NEG (mode, r);
34628 emit_insn (gen_rtx_SET (dst, r));
34631 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34632 add a reg_note saying that this was a division. Support both scalar and
34633 vector divide. Assumes no trapping math and finite arguments. */
34635 void
34636 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34638 machine_mode mode = GET_MODE (dst);
34639 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34640 int i;
34642 /* Low precision estimates guarantee 5 bits of accuracy. High
34643 precision estimates guarantee 14 bits of accuracy. SFmode
34644 requires 23 bits of accuracy. DFmode requires 52 bits of
34645 accuracy. Each pass at least doubles the accuracy, leading
34646 to the following. */
34647 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34648 if (mode == DFmode || mode == V2DFmode)
34649 passes++;
34651 enum insn_code code = optab_handler (smul_optab, mode);
34652 insn_gen_fn gen_mul = GEN_FCN (code);
34654 gcc_assert (code != CODE_FOR_nothing);
34656 one = rs6000_load_constant_and_splat (mode, dconst1);
34658 /* x0 = 1./d estimate */
34659 x0 = gen_reg_rtx (mode);
34660 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34661 UNSPEC_FRES)));
34663 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34664 if (passes > 1) {
34666 /* e0 = 1. - d * x0 */
34667 e0 = gen_reg_rtx (mode);
34668 rs6000_emit_nmsub (e0, d, x0, one);
34670 /* x1 = x0 + e0 * x0 */
34671 x1 = gen_reg_rtx (mode);
34672 rs6000_emit_madd (x1, e0, x0, x0);
34674 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34675 ++i, xprev = xnext, eprev = enext) {
34677 /* enext = eprev * eprev */
34678 enext = gen_reg_rtx (mode);
34679 emit_insn (gen_mul (enext, eprev, eprev));
34681 /* xnext = xprev + enext * xprev */
34682 xnext = gen_reg_rtx (mode);
34683 rs6000_emit_madd (xnext, enext, xprev, xprev);
34686 } else
34687 xprev = x0;
34689 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34691 /* u = n * xprev */
34692 u = gen_reg_rtx (mode);
34693 emit_insn (gen_mul (u, n, xprev));
34695 /* v = n - (d * u) */
34696 v = gen_reg_rtx (mode);
34697 rs6000_emit_nmsub (v, d, u, n);
34699 /* dst = (v * xprev) + u */
34700 rs6000_emit_madd (dst, v, xprev, u);
34702 if (note_p)
34703 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34706 /* Goldschmidt's Algorithm for single/double-precision floating point
34707 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34709 void
34710 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34712 machine_mode mode = GET_MODE (src);
34713 rtx e = gen_reg_rtx (mode);
34714 rtx g = gen_reg_rtx (mode);
34715 rtx h = gen_reg_rtx (mode);
34717 /* Low precision estimates guarantee 5 bits of accuracy. High
34718 precision estimates guarantee 14 bits of accuracy. SFmode
34719 requires 23 bits of accuracy. DFmode requires 52 bits of
34720 accuracy. Each pass at least doubles the accuracy, leading
34721 to the following. */
34722 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34723 if (mode == DFmode || mode == V2DFmode)
34724 passes++;
34726 int i;
34727 rtx mhalf;
34728 enum insn_code code = optab_handler (smul_optab, mode);
34729 insn_gen_fn gen_mul = GEN_FCN (code);
34731 gcc_assert (code != CODE_FOR_nothing);
34733 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34735 /* e = rsqrt estimate */
34736 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34737 UNSPEC_RSQRT)));
34739 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34740 if (!recip)
34742 rtx zero = force_reg (mode, CONST0_RTX (mode));
34744 if (mode == SFmode)
34746 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34747 e, zero, mode, 0);
34748 if (target != e)
34749 emit_move_insn (e, target);
34751 else
34753 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34754 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34758 /* g = sqrt estimate. */
34759 emit_insn (gen_mul (g, e, src));
34760 /* h = 1/(2*sqrt) estimate. */
34761 emit_insn (gen_mul (h, e, mhalf));
34763 if (recip)
34765 if (passes == 1)
34767 rtx t = gen_reg_rtx (mode);
34768 rs6000_emit_nmsub (t, g, h, mhalf);
34769 /* Apply correction directly to 1/rsqrt estimate. */
34770 rs6000_emit_madd (dst, e, t, e);
34772 else
34774 for (i = 0; i < passes; i++)
34776 rtx t1 = gen_reg_rtx (mode);
34777 rtx g1 = gen_reg_rtx (mode);
34778 rtx h1 = gen_reg_rtx (mode);
34780 rs6000_emit_nmsub (t1, g, h, mhalf);
34781 rs6000_emit_madd (g1, g, t1, g);
34782 rs6000_emit_madd (h1, h, t1, h);
34784 g = g1;
34785 h = h1;
34787 /* Multiply by 2 for 1/rsqrt. */
34788 emit_insn (gen_add3_insn (dst, h, h));
34791 else
34793 rtx t = gen_reg_rtx (mode);
34794 rs6000_emit_nmsub (t, g, h, mhalf);
34795 rs6000_emit_madd (dst, g, t, g);
34798 return;
34801 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34802 (Power7) targets. DST is the target, and SRC is the argument operand. */
34804 void
34805 rs6000_emit_popcount (rtx dst, rtx src)
34807 machine_mode mode = GET_MODE (dst);
34808 rtx tmp1, tmp2;
34810 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34811 if (TARGET_POPCNTD)
34813 if (mode == SImode)
34814 emit_insn (gen_popcntdsi2 (dst, src));
34815 else
34816 emit_insn (gen_popcntddi2 (dst, src));
34817 return;
34820 tmp1 = gen_reg_rtx (mode);
34822 if (mode == SImode)
34824 emit_insn (gen_popcntbsi2 (tmp1, src));
34825 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34826 NULL_RTX, 0);
34827 tmp2 = force_reg (SImode, tmp2);
34828 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34830 else
34832 emit_insn (gen_popcntbdi2 (tmp1, src));
34833 tmp2 = expand_mult (DImode, tmp1,
34834 GEN_INT ((HOST_WIDE_INT)
34835 0x01010101 << 32 | 0x01010101),
34836 NULL_RTX, 0);
34837 tmp2 = force_reg (DImode, tmp2);
34838 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34843 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34844 target, and SRC is the argument operand. */
34846 void
34847 rs6000_emit_parity (rtx dst, rtx src)
34849 machine_mode mode = GET_MODE (dst);
34850 rtx tmp;
34852 tmp = gen_reg_rtx (mode);
34854 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34855 if (TARGET_CMPB)
34857 if (mode == SImode)
34859 emit_insn (gen_popcntbsi2 (tmp, src));
34860 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34862 else
34864 emit_insn (gen_popcntbdi2 (tmp, src));
34865 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34867 return;
34870 if (mode == SImode)
34872 /* Is mult+shift >= shift+xor+shift+xor? */
34873 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34875 rtx tmp1, tmp2, tmp3, tmp4;
34877 tmp1 = gen_reg_rtx (SImode);
34878 emit_insn (gen_popcntbsi2 (tmp1, src));
34880 tmp2 = gen_reg_rtx (SImode);
34881 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
34882 tmp3 = gen_reg_rtx (SImode);
34883 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
34885 tmp4 = gen_reg_rtx (SImode);
34886 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
34887 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
34889 else
34890 rs6000_emit_popcount (tmp, src);
34891 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
34893 else
34895 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34896 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
34898 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
34900 tmp1 = gen_reg_rtx (DImode);
34901 emit_insn (gen_popcntbdi2 (tmp1, src));
34903 tmp2 = gen_reg_rtx (DImode);
34904 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
34905 tmp3 = gen_reg_rtx (DImode);
34906 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
34908 tmp4 = gen_reg_rtx (DImode);
34909 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
34910 tmp5 = gen_reg_rtx (DImode);
34911 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
34913 tmp6 = gen_reg_rtx (DImode);
34914 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
34915 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
34917 else
34918 rs6000_emit_popcount (tmp, src);
34919 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
34923 /* Expand an Altivec constant permutation for little endian mode.
34924 There are two issues: First, the two input operands must be
34925 swapped so that together they form a double-wide array in LE
34926 order. Second, the vperm instruction has surprising behavior
34927 in LE mode: it interprets the elements of the source vectors
34928 in BE mode ("left to right") and interprets the elements of
34929 the destination vector in LE mode ("right to left"). To
34930 correct for this, we must subtract each element of the permute
34931 control vector from 31.
34933 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34934 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34935 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34936 serve as the permute control vector. Then, in BE mode,
34938 vperm 9,10,11,12
34940 places the desired result in vr9. However, in LE mode the
34941 vector contents will be
34943 vr10 = 00000003 00000002 00000001 00000000
34944 vr11 = 00000007 00000006 00000005 00000004
34946 The result of the vperm using the same permute control vector is
34948 vr9 = 05000000 07000000 01000000 03000000
34950 That is, the leftmost 4 bytes of vr10 are interpreted as the
34951 source for the rightmost 4 bytes of vr9, and so on.
34953 If we change the permute control vector to
34955 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
34957 and issue
34959 vperm 9,11,10,12
34961 we get the desired
34963 vr9 = 00000006 00000004 00000002 00000000. */
34965 void
34966 altivec_expand_vec_perm_const_le (rtx operands[4])
34968 unsigned int i;
34969 rtx perm[16];
34970 rtx constv, unspec;
34971 rtx target = operands[0];
34972 rtx op0 = operands[1];
34973 rtx op1 = operands[2];
34974 rtx sel = operands[3];
34976 /* Unpack and adjust the constant selector. */
34977 for (i = 0; i < 16; ++i)
34979 rtx e = XVECEXP (sel, 0, i);
34980 unsigned int elt = 31 - (INTVAL (e) & 31);
34981 perm[i] = GEN_INT (elt);
34984 /* Expand to a permute, swapping the inputs and using the
34985 adjusted selector. */
34986 if (!REG_P (op0))
34987 op0 = force_reg (V16QImode, op0);
34988 if (!REG_P (op1))
34989 op1 = force_reg (V16QImode, op1);
34991 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
34992 constv = force_reg (V16QImode, constv);
34993 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
34994 UNSPEC_VPERM);
34995 if (!REG_P (target))
34997 rtx tmp = gen_reg_rtx (V16QImode);
34998 emit_move_insn (tmp, unspec);
34999 unspec = tmp;
35002 emit_move_insn (target, unspec);
35005 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35006 permute control vector. But here it's not a constant, so we must
35007 generate a vector NAND or NOR to do the adjustment. */
35009 void
35010 altivec_expand_vec_perm_le (rtx operands[4])
35012 rtx notx, iorx, unspec;
35013 rtx target = operands[0];
35014 rtx op0 = operands[1];
35015 rtx op1 = operands[2];
35016 rtx sel = operands[3];
35017 rtx tmp = target;
35018 rtx norreg = gen_reg_rtx (V16QImode);
35019 machine_mode mode = GET_MODE (target);
35021 /* Get everything in regs so the pattern matches. */
35022 if (!REG_P (op0))
35023 op0 = force_reg (mode, op0);
35024 if (!REG_P (op1))
35025 op1 = force_reg (mode, op1);
35026 if (!REG_P (sel))
35027 sel = force_reg (V16QImode, sel);
35028 if (!REG_P (target))
35029 tmp = gen_reg_rtx (mode);
35031 if (TARGET_P9_VECTOR)
35033 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
35034 UNSPEC_VPERMR);
35036 else
35038 /* Invert the selector with a VNAND if available, else a VNOR.
35039 The VNAND is preferred for future fusion opportunities. */
35040 notx = gen_rtx_NOT (V16QImode, sel);
35041 iorx = (TARGET_P8_VECTOR
35042 ? gen_rtx_IOR (V16QImode, notx, notx)
35043 : gen_rtx_AND (V16QImode, notx, notx));
35044 emit_insn (gen_rtx_SET (norreg, iorx));
35046 /* Permute with operands reversed and adjusted selector. */
35047 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35048 UNSPEC_VPERM);
35051 /* Copy into target, possibly by way of a register. */
35052 if (!REG_P (target))
35054 emit_move_insn (tmp, unspec);
35055 unspec = tmp;
35058 emit_move_insn (target, unspec);
35061 /* Expand an Altivec constant permutation. Return true if we match
35062 an efficient implementation; false to fall back to VPERM. */
35064 bool
35065 altivec_expand_vec_perm_const (rtx operands[4])
35067 struct altivec_perm_insn {
35068 HOST_WIDE_INT mask;
35069 enum insn_code impl;
35070 unsigned char perm[16];
35072 static const struct altivec_perm_insn patterns[] = {
35073 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35074 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35075 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35076 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35077 { OPTION_MASK_ALTIVEC,
35078 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35079 : CODE_FOR_altivec_vmrglb_direct),
35080 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35081 { OPTION_MASK_ALTIVEC,
35082 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35083 : CODE_FOR_altivec_vmrglh_direct),
35084 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35085 { OPTION_MASK_ALTIVEC,
35086 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35087 : CODE_FOR_altivec_vmrglw_direct),
35088 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35089 { OPTION_MASK_ALTIVEC,
35090 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35091 : CODE_FOR_altivec_vmrghb_direct),
35092 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35093 { OPTION_MASK_ALTIVEC,
35094 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35095 : CODE_FOR_altivec_vmrghh_direct),
35096 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35097 { OPTION_MASK_ALTIVEC,
35098 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35099 : CODE_FOR_altivec_vmrghw_direct),
35100 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35101 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew_v4si,
35102 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35103 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
35104 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35107 unsigned int i, j, elt, which;
35108 unsigned char perm[16];
35109 rtx target, op0, op1, sel, x;
35110 bool one_vec;
35112 target = operands[0];
35113 op0 = operands[1];
35114 op1 = operands[2];
35115 sel = operands[3];
35117 /* Unpack the constant selector. */
35118 for (i = which = 0; i < 16; ++i)
35120 rtx e = XVECEXP (sel, 0, i);
35121 elt = INTVAL (e) & 31;
35122 which |= (elt < 16 ? 1 : 2);
35123 perm[i] = elt;
35126 /* Simplify the constant selector based on operands. */
35127 switch (which)
35129 default:
35130 gcc_unreachable ();
35132 case 3:
35133 one_vec = false;
35134 if (!rtx_equal_p (op0, op1))
35135 break;
35136 /* FALLTHRU */
35138 case 2:
35139 for (i = 0; i < 16; ++i)
35140 perm[i] &= 15;
35141 op0 = op1;
35142 one_vec = true;
35143 break;
35145 case 1:
35146 op1 = op0;
35147 one_vec = true;
35148 break;
35151 /* Look for splat patterns. */
35152 if (one_vec)
35154 elt = perm[0];
35156 for (i = 0; i < 16; ++i)
35157 if (perm[i] != elt)
35158 break;
35159 if (i == 16)
35161 if (!BYTES_BIG_ENDIAN)
35162 elt = 15 - elt;
35163 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35164 return true;
35167 if (elt % 2 == 0)
35169 for (i = 0; i < 16; i += 2)
35170 if (perm[i] != elt || perm[i + 1] != elt + 1)
35171 break;
35172 if (i == 16)
35174 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35175 x = gen_reg_rtx (V8HImode);
35176 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35177 GEN_INT (field)));
35178 emit_move_insn (target, gen_lowpart (V16QImode, x));
35179 return true;
35183 if (elt % 4 == 0)
35185 for (i = 0; i < 16; i += 4)
35186 if (perm[i] != elt
35187 || perm[i + 1] != elt + 1
35188 || perm[i + 2] != elt + 2
35189 || perm[i + 3] != elt + 3)
35190 break;
35191 if (i == 16)
35193 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35194 x = gen_reg_rtx (V4SImode);
35195 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35196 GEN_INT (field)));
35197 emit_move_insn (target, gen_lowpart (V16QImode, x));
35198 return true;
35203 /* Look for merge and pack patterns. */
35204 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35206 bool swapped;
35208 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35209 continue;
35211 elt = patterns[j].perm[0];
35212 if (perm[0] == elt)
35213 swapped = false;
35214 else if (perm[0] == elt + 16)
35215 swapped = true;
35216 else
35217 continue;
35218 for (i = 1; i < 16; ++i)
35220 elt = patterns[j].perm[i];
35221 if (swapped)
35222 elt = (elt >= 16 ? elt - 16 : elt + 16);
35223 else if (one_vec && elt >= 16)
35224 elt -= 16;
35225 if (perm[i] != elt)
35226 break;
35228 if (i == 16)
35230 enum insn_code icode = patterns[j].impl;
35231 machine_mode omode = insn_data[icode].operand[0].mode;
35232 machine_mode imode = insn_data[icode].operand[1].mode;
35234 /* For little-endian, don't use vpkuwum and vpkuhum if the
35235 underlying vector type is not V4SI and V8HI, respectively.
35236 For example, using vpkuwum with a V8HI picks up the even
35237 halfwords (BE numbering) when the even halfwords (LE
35238 numbering) are what we need. */
35239 if (!BYTES_BIG_ENDIAN
35240 && icode == CODE_FOR_altivec_vpkuwum_direct
35241 && ((GET_CODE (op0) == REG
35242 && GET_MODE (op0) != V4SImode)
35243 || (GET_CODE (op0) == SUBREG
35244 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35245 continue;
35246 if (!BYTES_BIG_ENDIAN
35247 && icode == CODE_FOR_altivec_vpkuhum_direct
35248 && ((GET_CODE (op0) == REG
35249 && GET_MODE (op0) != V8HImode)
35250 || (GET_CODE (op0) == SUBREG
35251 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35252 continue;
35254 /* For little-endian, the two input operands must be swapped
35255 (or swapped back) to ensure proper right-to-left numbering
35256 from 0 to 2N-1. */
35257 if (swapped ^ !BYTES_BIG_ENDIAN)
35258 std::swap (op0, op1);
35259 if (imode != V16QImode)
35261 op0 = gen_lowpart (imode, op0);
35262 op1 = gen_lowpart (imode, op1);
35264 if (omode == V16QImode)
35265 x = target;
35266 else
35267 x = gen_reg_rtx (omode);
35268 emit_insn (GEN_FCN (icode) (x, op0, op1));
35269 if (omode != V16QImode)
35270 emit_move_insn (target, gen_lowpart (V16QImode, x));
35271 return true;
35275 if (!BYTES_BIG_ENDIAN)
35277 altivec_expand_vec_perm_const_le (operands);
35278 return true;
35281 return false;
35284 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35285 Return true if we match an efficient implementation. */
35287 static bool
35288 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35289 unsigned char perm0, unsigned char perm1)
35291 rtx x;
35293 /* If both selectors come from the same operand, fold to single op. */
35294 if ((perm0 & 2) == (perm1 & 2))
35296 if (perm0 & 2)
35297 op0 = op1;
35298 else
35299 op1 = op0;
35301 /* If both operands are equal, fold to simpler permutation. */
35302 if (rtx_equal_p (op0, op1))
35304 perm0 = perm0 & 1;
35305 perm1 = (perm1 & 1) + 2;
35307 /* If the first selector comes from the second operand, swap. */
35308 else if (perm0 & 2)
35310 if (perm1 & 2)
35311 return false;
35312 perm0 -= 2;
35313 perm1 += 2;
35314 std::swap (op0, op1);
35316 /* If the second selector does not come from the second operand, fail. */
35317 else if ((perm1 & 2) == 0)
35318 return false;
35320 /* Success! */
35321 if (target != NULL)
35323 machine_mode vmode, dmode;
35324 rtvec v;
35326 vmode = GET_MODE (target);
35327 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35328 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
35329 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35330 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35331 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35332 emit_insn (gen_rtx_SET (target, x));
35334 return true;
35337 bool
35338 rs6000_expand_vec_perm_const (rtx operands[4])
35340 rtx target, op0, op1, sel;
35341 unsigned char perm0, perm1;
35343 target = operands[0];
35344 op0 = operands[1];
35345 op1 = operands[2];
35346 sel = operands[3];
35348 /* Unpack the constant selector. */
35349 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35350 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35352 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35355 /* Test whether a constant permutation is supported. */
35357 static bool
35358 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
35359 const unsigned char *sel)
35361 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35362 if (TARGET_ALTIVEC)
35363 return true;
35365 /* Check for ps_merge* or evmerge* insns. */
35366 if (TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35368 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35369 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35370 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35373 return false;
35376 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35378 static void
35379 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35380 machine_mode vmode, unsigned nelt, rtx perm[])
35382 machine_mode imode;
35383 rtx x;
35385 imode = vmode;
35386 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
35388 imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
35389 imode = mode_for_vector (imode, nelt);
35392 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
35393 x = expand_vec_perm (vmode, op0, op1, x, target);
35394 if (x != target)
35395 emit_move_insn (target, x);
35398 /* Expand an extract even operation. */
35400 void
35401 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35403 machine_mode vmode = GET_MODE (target);
35404 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35405 rtx perm[16];
35407 for (i = 0; i < nelt; i++)
35408 perm[i] = GEN_INT (i * 2);
35410 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35413 /* Expand a vector interleave operation. */
35415 void
35416 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35418 machine_mode vmode = GET_MODE (target);
35419 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35420 rtx perm[16];
35422 high = (highp ? 0 : nelt / 2);
35423 for (i = 0; i < nelt / 2; i++)
35425 perm[i * 2] = GEN_INT (i + high);
35426 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
35429 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35432 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35433 void
35434 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35436 HOST_WIDE_INT hwi_scale (scale);
35437 REAL_VALUE_TYPE r_pow;
35438 rtvec v = rtvec_alloc (2);
35439 rtx elt;
35440 rtx scale_vec = gen_reg_rtx (V2DFmode);
35441 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35442 elt = const_double_from_real_value (r_pow, DFmode);
35443 RTVEC_ELT (v, 0) = elt;
35444 RTVEC_ELT (v, 1) = elt;
35445 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35446 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35449 /* Return an RTX representing where to find the function value of a
35450 function returning MODE. */
35451 static rtx
35452 rs6000_complex_function_value (machine_mode mode)
35454 unsigned int regno;
35455 rtx r1, r2;
35456 machine_mode inner = GET_MODE_INNER (mode);
35457 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35459 if (TARGET_FLOAT128_TYPE
35460 && (mode == KCmode
35461 || (mode == TCmode && TARGET_IEEEQUAD)))
35462 regno = ALTIVEC_ARG_RETURN;
35464 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35465 regno = FP_ARG_RETURN;
35467 else
35469 regno = GP_ARG_RETURN;
35471 /* 32-bit is OK since it'll go in r3/r4. */
35472 if (TARGET_32BIT && inner_bytes >= 4)
35473 return gen_rtx_REG (mode, regno);
35476 if (inner_bytes >= 8)
35477 return gen_rtx_REG (mode, regno);
35479 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35480 const0_rtx);
35481 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35482 GEN_INT (inner_bytes));
35483 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35486 /* Return an rtx describing a return value of MODE as a PARALLEL
35487 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35488 stride REG_STRIDE. */
35490 static rtx
35491 rs6000_parallel_return (machine_mode mode,
35492 int n_elts, machine_mode elt_mode,
35493 unsigned int regno, unsigned int reg_stride)
35495 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35497 int i;
35498 for (i = 0; i < n_elts; i++)
35500 rtx r = gen_rtx_REG (elt_mode, regno);
35501 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35502 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35503 regno += reg_stride;
35506 return par;
35509 /* Target hook for TARGET_FUNCTION_VALUE.
35511 An integer value is in r3 and a floating-point value is in fp1,
35512 unless -msoft-float. */
35514 static rtx
35515 rs6000_function_value (const_tree valtype,
35516 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35517 bool outgoing ATTRIBUTE_UNUSED)
35519 machine_mode mode;
35520 unsigned int regno;
35521 machine_mode elt_mode;
35522 int n_elts;
35524 /* Special handling for structs in darwin64. */
35525 if (TARGET_MACHO
35526 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35528 CUMULATIVE_ARGS valcum;
35529 rtx valret;
35531 valcum.words = 0;
35532 valcum.fregno = FP_ARG_MIN_REG;
35533 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35534 /* Do a trial code generation as if this were going to be passed as
35535 an argument; if any part goes in memory, we return NULL. */
35536 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35537 if (valret)
35538 return valret;
35539 /* Otherwise fall through to standard ABI rules. */
35542 mode = TYPE_MODE (valtype);
35544 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35545 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35547 int first_reg, n_regs;
35549 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35551 /* _Decimal128 must use even/odd register pairs. */
35552 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35553 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35555 else
35557 first_reg = ALTIVEC_ARG_RETURN;
35558 n_regs = 1;
35561 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35564 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35565 if (TARGET_32BIT && TARGET_POWERPC64)
35566 switch (mode)
35568 default:
35569 break;
35570 case DImode:
35571 case SCmode:
35572 case DCmode:
35573 case TCmode:
35574 int count = GET_MODE_SIZE (mode) / 4;
35575 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35578 if ((INTEGRAL_TYPE_P (valtype)
35579 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35580 || POINTER_TYPE_P (valtype))
35581 mode = TARGET_32BIT ? SImode : DImode;
35583 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35584 /* _Decimal128 must use an even/odd register pair. */
35585 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35586 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35587 && !FLOAT128_VECTOR_P (mode)
35588 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35589 regno = FP_ARG_RETURN;
35590 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35591 && targetm.calls.split_complex_arg)
35592 return rs6000_complex_function_value (mode);
35593 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35594 return register is used in both cases, and we won't see V2DImode/V2DFmode
35595 for pure altivec, combine the two cases. */
35596 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35597 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35598 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35599 regno = ALTIVEC_ARG_RETURN;
35600 else
35601 regno = GP_ARG_RETURN;
35603 return gen_rtx_REG (mode, regno);
35606 /* Define how to find the value returned by a library function
35607 assuming the value has mode MODE. */
35609 rs6000_libcall_value (machine_mode mode)
35611 unsigned int regno;
35613 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35614 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35615 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35617 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35618 /* _Decimal128 must use an even/odd register pair. */
35619 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35620 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35621 && TARGET_HARD_FLOAT
35622 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35623 regno = FP_ARG_RETURN;
35624 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35625 return register is used in both cases, and we won't see V2DImode/V2DFmode
35626 for pure altivec, combine the two cases. */
35627 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35628 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35629 regno = ALTIVEC_ARG_RETURN;
35630 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35631 return rs6000_complex_function_value (mode);
35632 else
35633 regno = GP_ARG_RETURN;
35635 return gen_rtx_REG (mode, regno);
35638 /* Compute register pressure classes. We implement the target hook to avoid
35639 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35640 lead to incorrect estimates of number of available registers and therefor
35641 increased register pressure/spill. */
35642 static int
35643 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35645 int n;
35647 n = 0;
35648 pressure_classes[n++] = GENERAL_REGS;
35649 if (TARGET_VSX)
35650 pressure_classes[n++] = VSX_REGS;
35651 else
35653 if (TARGET_ALTIVEC)
35654 pressure_classes[n++] = ALTIVEC_REGS;
35655 if (TARGET_HARD_FLOAT)
35656 pressure_classes[n++] = FLOAT_REGS;
35658 pressure_classes[n++] = CR_REGS;
35659 pressure_classes[n++] = SPECIAL_REGS;
35661 return n;
35664 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35665 Frame pointer elimination is automatically handled.
35667 For the RS/6000, if frame pointer elimination is being done, we would like
35668 to convert ap into fp, not sp.
35670 We need r30 if -mminimal-toc was specified, and there are constant pool
35671 references. */
35673 static bool
35674 rs6000_can_eliminate (const int from, const int to)
35676 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35677 ? ! frame_pointer_needed
35678 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35679 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35680 || constant_pool_empty_p ()
35681 : true);
35684 /* Define the offset between two registers, FROM to be eliminated and its
35685 replacement TO, at the start of a routine. */
35686 HOST_WIDE_INT
35687 rs6000_initial_elimination_offset (int from, int to)
35689 rs6000_stack_t *info = rs6000_stack_info ();
35690 HOST_WIDE_INT offset;
35692 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35693 offset = info->push_p ? 0 : -info->total_size;
35694 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35696 offset = info->push_p ? 0 : -info->total_size;
35697 if (FRAME_GROWS_DOWNWARD)
35698 offset += info->fixed_size + info->vars_size + info->parm_size;
35700 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35701 offset = FRAME_GROWS_DOWNWARD
35702 ? info->fixed_size + info->vars_size + info->parm_size
35703 : 0;
35704 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35705 offset = info->total_size;
35706 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35707 offset = info->push_p ? info->total_size : 0;
35708 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35709 offset = 0;
35710 else
35711 gcc_unreachable ();
35713 return offset;
35716 /* Fill in sizes of registers used by unwinder. */
35718 static void
35719 rs6000_init_dwarf_reg_sizes_extra (tree address)
35721 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35723 int i;
35724 machine_mode mode = TYPE_MODE (char_type_node);
35725 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35726 rtx mem = gen_rtx_MEM (BLKmode, addr);
35727 rtx value = gen_int_mode (16, mode);
35729 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35730 The unwinder still needs to know the size of Altivec registers. */
35732 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35734 int column = DWARF_REG_TO_UNWIND_COLUMN
35735 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35736 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35738 emit_move_insn (adjust_address (mem, mode, offset), value);
35743 /* Map internal gcc register numbers to debug format register numbers.
35744 FORMAT specifies the type of debug register number to use:
35745 0 -- debug information, except for frame-related sections
35746 1 -- DWARF .debug_frame section
35747 2 -- DWARF .eh_frame section */
35749 unsigned int
35750 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35752 /* Except for the above, we use the internal number for non-DWARF
35753 debug information, and also for .eh_frame. */
35754 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35755 return regno;
35757 /* On some platforms, we use the standard DWARF register
35758 numbering for .debug_info and .debug_frame. */
35759 #ifdef RS6000_USE_DWARF_NUMBERING
35760 if (regno <= 63)
35761 return regno;
35762 if (regno == LR_REGNO)
35763 return 108;
35764 if (regno == CTR_REGNO)
35765 return 109;
35766 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35767 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35768 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35769 to the DWARF reg for CR. */
35770 if (format == 1 && regno == CR2_REGNO)
35771 return 64;
35772 if (CR_REGNO_P (regno))
35773 return regno - CR0_REGNO + 86;
35774 if (regno == CA_REGNO)
35775 return 101; /* XER */
35776 if (ALTIVEC_REGNO_P (regno))
35777 return regno - FIRST_ALTIVEC_REGNO + 1124;
35778 if (regno == VRSAVE_REGNO)
35779 return 356;
35780 if (regno == VSCR_REGNO)
35781 return 67;
35782 #endif
35783 return regno;
35786 /* target hook eh_return_filter_mode */
35787 static machine_mode
35788 rs6000_eh_return_filter_mode (void)
35790 return TARGET_32BIT ? SImode : word_mode;
35793 /* Target hook for scalar_mode_supported_p. */
35794 static bool
35795 rs6000_scalar_mode_supported_p (machine_mode mode)
35797 /* -m32 does not support TImode. This is the default, from
35798 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35799 same ABI as for -m32. But default_scalar_mode_supported_p allows
35800 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35801 for -mpowerpc64. */
35802 if (TARGET_32BIT && mode == TImode)
35803 return false;
35805 if (DECIMAL_FLOAT_MODE_P (mode))
35806 return default_decimal_float_supported_p ();
35807 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
35808 return true;
35809 else
35810 return default_scalar_mode_supported_p (mode);
35813 /* Target hook for vector_mode_supported_p. */
35814 static bool
35815 rs6000_vector_mode_supported_p (machine_mode mode)
35818 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
35819 return true;
35821 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35822 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35823 double-double. */
35824 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35825 return true;
35827 else
35828 return false;
35831 /* Target hook for floatn_mode. */
35832 static machine_mode
35833 rs6000_floatn_mode (int n, bool extended)
35835 if (extended)
35837 switch (n)
35839 case 32:
35840 return DFmode;
35842 case 64:
35843 if (TARGET_FLOAT128_KEYWORD)
35844 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35845 else
35846 return VOIDmode;
35848 case 128:
35849 return VOIDmode;
35851 default:
35852 /* Those are the only valid _FloatNx types. */
35853 gcc_unreachable ();
35856 else
35858 switch (n)
35860 case 32:
35861 return SFmode;
35863 case 64:
35864 return DFmode;
35866 case 128:
35867 if (TARGET_FLOAT128_KEYWORD)
35868 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35869 else
35870 return VOIDmode;
35872 default:
35873 return VOIDmode;
35879 /* Target hook for c_mode_for_suffix. */
35880 static machine_mode
35881 rs6000_c_mode_for_suffix (char suffix)
35883 if (TARGET_FLOAT128_TYPE)
35885 if (suffix == 'q' || suffix == 'Q')
35886 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35888 /* At the moment, we are not defining a suffix for IBM extended double.
35889 If/when the default for -mabi=ieeelongdouble is changed, and we want
35890 to support __ibm128 constants in legacy library code, we may need to
35891 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35892 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35893 __float80 constants. */
35896 return VOIDmode;
35899 /* Target hook for invalid_arg_for_unprototyped_fn. */
35900 static const char *
35901 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
35903 return (!rs6000_darwin64_abi
35904 && typelist == 0
35905 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
35906 && (funcdecl == NULL_TREE
35907 || (TREE_CODE (funcdecl) == FUNCTION_DECL
35908 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
35909 ? N_("AltiVec argument passed to unprototyped function")
35910 : NULL;
35913 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35914 setup by using __stack_chk_fail_local hidden function instead of
35915 calling __stack_chk_fail directly. Otherwise it is better to call
35916 __stack_chk_fail directly. */
35918 static tree ATTRIBUTE_UNUSED
35919 rs6000_stack_protect_fail (void)
35921 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
35922 ? default_hidden_stack_protect_fail ()
35923 : default_external_stack_protect_fail ();
35926 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35928 #if TARGET_ELF
35929 static unsigned HOST_WIDE_INT
35930 rs6000_asan_shadow_offset (void)
35932 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
35934 #endif
35936 /* Mask options that we want to support inside of attribute((target)) and
35937 #pragma GCC target operations. Note, we do not include things like
35938 64/32-bit, endianness, hard/soft floating point, etc. that would have
35939 different calling sequences. */
35941 struct rs6000_opt_mask {
35942 const char *name; /* option name */
35943 HOST_WIDE_INT mask; /* mask to set */
35944 bool invert; /* invert sense of mask */
35945 bool valid_target; /* option is a target option */
35948 static struct rs6000_opt_mask const rs6000_opt_masks[] =
35950 { "altivec", OPTION_MASK_ALTIVEC, false, true },
35951 { "cmpb", OPTION_MASK_CMPB, false, true },
35952 { "crypto", OPTION_MASK_CRYPTO, false, true },
35953 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
35954 { "dlmzb", OPTION_MASK_DLMZB, false, true },
35955 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
35956 false, true },
35957 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, false },
35958 { "float128-type", OPTION_MASK_FLOAT128_TYPE, false, false },
35959 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
35960 { "fprnd", OPTION_MASK_FPRND, false, true },
35961 { "hard-dfp", OPTION_MASK_DFP, false, true },
35962 { "htm", OPTION_MASK_HTM, false, true },
35963 { "isel", OPTION_MASK_ISEL, false, true },
35964 { "mfcrf", OPTION_MASK_MFCRF, false, true },
35965 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
35966 { "modulo", OPTION_MASK_MODULO, false, true },
35967 { "mulhw", OPTION_MASK_MULHW, false, true },
35968 { "multiple", OPTION_MASK_MULTIPLE, false, true },
35969 { "popcntb", OPTION_MASK_POPCNTB, false, true },
35970 { "popcntd", OPTION_MASK_POPCNTD, false, true },
35971 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
35972 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
35973 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
35974 { "power9-dform-scalar", OPTION_MASK_P9_DFORM_SCALAR, false, true },
35975 { "power9-dform-vector", OPTION_MASK_P9_DFORM_VECTOR, false, true },
35976 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
35977 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
35978 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
35979 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
35980 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
35981 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
35982 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
35983 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
35984 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
35985 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
35986 { "string", OPTION_MASK_STRING, false, true },
35987 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
35988 { "update", OPTION_MASK_NO_UPDATE, true , true },
35989 { "vsx", OPTION_MASK_VSX, false, true },
35990 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
35991 #ifdef OPTION_MASK_64BIT
35992 #if TARGET_AIX_OS
35993 { "aix64", OPTION_MASK_64BIT, false, false },
35994 { "aix32", OPTION_MASK_64BIT, true, false },
35995 #else
35996 { "64", OPTION_MASK_64BIT, false, false },
35997 { "32", OPTION_MASK_64BIT, true, false },
35998 #endif
35999 #endif
36000 #ifdef OPTION_MASK_EABI
36001 { "eabi", OPTION_MASK_EABI, false, false },
36002 #endif
36003 #ifdef OPTION_MASK_LITTLE_ENDIAN
36004 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36005 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36006 #endif
36007 #ifdef OPTION_MASK_RELOCATABLE
36008 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36009 #endif
36010 #ifdef OPTION_MASK_STRICT_ALIGN
36011 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36012 #endif
36013 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36014 { "string", OPTION_MASK_STRING, false, false },
36017 /* Builtin mask mapping for printing the flags. */
36018 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36020 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36021 { "vsx", RS6000_BTM_VSX, false, false },
36022 { "paired", RS6000_BTM_PAIRED, false, false },
36023 { "fre", RS6000_BTM_FRE, false, false },
36024 { "fres", RS6000_BTM_FRES, false, false },
36025 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36026 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36027 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36028 { "cell", RS6000_BTM_CELL, false, false },
36029 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36030 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36031 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36032 { "crypto", RS6000_BTM_CRYPTO, false, false },
36033 { "htm", RS6000_BTM_HTM, false, false },
36034 { "hard-dfp", RS6000_BTM_DFP, false, false },
36035 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36036 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36037 { "float128", RS6000_BTM_FLOAT128, false, false },
36040 /* Option variables that we want to support inside attribute((target)) and
36041 #pragma GCC target operations. */
36043 struct rs6000_opt_var {
36044 const char *name; /* option name */
36045 size_t global_offset; /* offset of the option in global_options. */
36046 size_t target_offset; /* offset of the option in target options. */
36049 static struct rs6000_opt_var const rs6000_opt_vars[] =
36051 { "friz",
36052 offsetof (struct gcc_options, x_TARGET_FRIZ),
36053 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36054 { "avoid-indexed-addresses",
36055 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36056 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36057 { "paired",
36058 offsetof (struct gcc_options, x_rs6000_paired_float),
36059 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36060 { "longcall",
36061 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36062 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36063 { "optimize-swaps",
36064 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36065 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36066 { "allow-movmisalign",
36067 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36068 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36069 { "allow-df-permute",
36070 offsetof (struct gcc_options, x_TARGET_ALLOW_DF_PERMUTE),
36071 offsetof (struct cl_target_option, x_TARGET_ALLOW_DF_PERMUTE), },
36072 { "sched-groups",
36073 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36074 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36075 { "always-hint",
36076 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36077 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36078 { "align-branch-targets",
36079 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36080 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36081 { "vectorize-builtins",
36082 offsetof (struct gcc_options, x_TARGET_VECTORIZE_BUILTINS),
36083 offsetof (struct cl_target_option, x_TARGET_VECTORIZE_BUILTINS), },
36084 { "tls-markers",
36085 offsetof (struct gcc_options, x_tls_markers),
36086 offsetof (struct cl_target_option, x_tls_markers), },
36087 { "sched-prolog",
36088 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36089 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36090 { "sched-epilog",
36091 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36092 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36095 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36096 parsing. Return true if there were no errors. */
36098 static bool
36099 rs6000_inner_target_options (tree args, bool attr_p)
36101 bool ret = true;
36103 if (args == NULL_TREE)
36106 else if (TREE_CODE (args) == STRING_CST)
36108 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36109 char *q;
36111 while ((q = strtok (p, ",")) != NULL)
36113 bool error_p = false;
36114 bool not_valid_p = false;
36115 const char *cpu_opt = NULL;
36117 p = NULL;
36118 if (strncmp (q, "cpu=", 4) == 0)
36120 int cpu_index = rs6000_cpu_name_lookup (q+4);
36121 if (cpu_index >= 0)
36122 rs6000_cpu_index = cpu_index;
36123 else
36125 error_p = true;
36126 cpu_opt = q+4;
36129 else if (strncmp (q, "tune=", 5) == 0)
36131 int tune_index = rs6000_cpu_name_lookup (q+5);
36132 if (tune_index >= 0)
36133 rs6000_tune_index = tune_index;
36134 else
36136 error_p = true;
36137 cpu_opt = q+5;
36140 else
36142 size_t i;
36143 bool invert = false;
36144 char *r = q;
36146 error_p = true;
36147 if (strncmp (r, "no-", 3) == 0)
36149 invert = true;
36150 r += 3;
36153 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36154 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36156 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36158 if (!rs6000_opt_masks[i].valid_target)
36159 not_valid_p = true;
36160 else
36162 error_p = false;
36163 rs6000_isa_flags_explicit |= mask;
36165 /* VSX needs altivec, so -mvsx automagically sets
36166 altivec and disables -mavoid-indexed-addresses. */
36167 if (!invert)
36169 if (mask == OPTION_MASK_VSX)
36171 mask |= OPTION_MASK_ALTIVEC;
36172 TARGET_AVOID_XFORM = 0;
36176 if (rs6000_opt_masks[i].invert)
36177 invert = !invert;
36179 if (invert)
36180 rs6000_isa_flags &= ~mask;
36181 else
36182 rs6000_isa_flags |= mask;
36184 break;
36187 if (error_p && !not_valid_p)
36189 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36190 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36192 size_t j = rs6000_opt_vars[i].global_offset;
36193 *((int *) ((char *)&global_options + j)) = !invert;
36194 error_p = false;
36195 not_valid_p = false;
36196 break;
36201 if (error_p)
36203 const char *eprefix, *esuffix;
36205 ret = false;
36206 if (attr_p)
36208 eprefix = "__attribute__((__target__(";
36209 esuffix = ")))";
36211 else
36213 eprefix = "#pragma GCC target ";
36214 esuffix = "";
36217 if (cpu_opt)
36218 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
36219 q, esuffix);
36220 else if (not_valid_p)
36221 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
36222 else
36223 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
36228 else if (TREE_CODE (args) == TREE_LIST)
36232 tree value = TREE_VALUE (args);
36233 if (value)
36235 bool ret2 = rs6000_inner_target_options (value, attr_p);
36236 if (!ret2)
36237 ret = false;
36239 args = TREE_CHAIN (args);
36241 while (args != NULL_TREE);
36244 else
36246 error ("attribute %<target%> argument not a string");
36247 return false;
36250 return ret;
36253 /* Print out the target options as a list for -mdebug=target. */
36255 static void
36256 rs6000_debug_target_options (tree args, const char *prefix)
36258 if (args == NULL_TREE)
36259 fprintf (stderr, "%s<NULL>", prefix);
36261 else if (TREE_CODE (args) == STRING_CST)
36263 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36264 char *q;
36266 while ((q = strtok (p, ",")) != NULL)
36268 p = NULL;
36269 fprintf (stderr, "%s\"%s\"", prefix, q);
36270 prefix = ", ";
36274 else if (TREE_CODE (args) == TREE_LIST)
36278 tree value = TREE_VALUE (args);
36279 if (value)
36281 rs6000_debug_target_options (value, prefix);
36282 prefix = ", ";
36284 args = TREE_CHAIN (args);
36286 while (args != NULL_TREE);
36289 else
36290 gcc_unreachable ();
36292 return;
36296 /* Hook to validate attribute((target("..."))). */
36298 static bool
36299 rs6000_valid_attribute_p (tree fndecl,
36300 tree ARG_UNUSED (name),
36301 tree args,
36302 int flags)
36304 struct cl_target_option cur_target;
36305 bool ret;
36306 tree old_optimize = build_optimization_node (&global_options);
36307 tree new_target, new_optimize;
36308 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36310 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36312 if (TARGET_DEBUG_TARGET)
36314 tree tname = DECL_NAME (fndecl);
36315 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36316 if (tname)
36317 fprintf (stderr, "function: %.*s\n",
36318 (int) IDENTIFIER_LENGTH (tname),
36319 IDENTIFIER_POINTER (tname));
36320 else
36321 fprintf (stderr, "function: unknown\n");
36323 fprintf (stderr, "args:");
36324 rs6000_debug_target_options (args, " ");
36325 fprintf (stderr, "\n");
36327 if (flags)
36328 fprintf (stderr, "flags: 0x%x\n", flags);
36330 fprintf (stderr, "--------------------\n");
36333 /* attribute((target("default"))) does nothing, beyond
36334 affecting multi-versioning. */
36335 if (TREE_VALUE (args)
36336 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36337 && TREE_CHAIN (args) == NULL_TREE
36338 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36339 return true;
36341 old_optimize = build_optimization_node (&global_options);
36342 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36344 /* If the function changed the optimization levels as well as setting target
36345 options, start with the optimizations specified. */
36346 if (func_optimize && func_optimize != old_optimize)
36347 cl_optimization_restore (&global_options,
36348 TREE_OPTIMIZATION (func_optimize));
36350 /* The target attributes may also change some optimization flags, so update
36351 the optimization options if necessary. */
36352 cl_target_option_save (&cur_target, &global_options);
36353 rs6000_cpu_index = rs6000_tune_index = -1;
36354 ret = rs6000_inner_target_options (args, true);
36356 /* Set up any additional state. */
36357 if (ret)
36359 ret = rs6000_option_override_internal (false);
36360 new_target = build_target_option_node (&global_options);
36362 else
36363 new_target = NULL;
36365 new_optimize = build_optimization_node (&global_options);
36367 if (!new_target)
36368 ret = false;
36370 else if (fndecl)
36372 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36374 if (old_optimize != new_optimize)
36375 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36378 cl_target_option_restore (&global_options, &cur_target);
36380 if (old_optimize != new_optimize)
36381 cl_optimization_restore (&global_options,
36382 TREE_OPTIMIZATION (old_optimize));
36384 return ret;
36388 /* Hook to validate the current #pragma GCC target and set the state, and
36389 update the macros based on what was changed. If ARGS is NULL, then
36390 POP_TARGET is used to reset the options. */
36392 bool
36393 rs6000_pragma_target_parse (tree args, tree pop_target)
36395 tree prev_tree = build_target_option_node (&global_options);
36396 tree cur_tree;
36397 struct cl_target_option *prev_opt, *cur_opt;
36398 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36399 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36401 if (TARGET_DEBUG_TARGET)
36403 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36404 fprintf (stderr, "args:");
36405 rs6000_debug_target_options (args, " ");
36406 fprintf (stderr, "\n");
36408 if (pop_target)
36410 fprintf (stderr, "pop_target:\n");
36411 debug_tree (pop_target);
36413 else
36414 fprintf (stderr, "pop_target: <NULL>\n");
36416 fprintf (stderr, "--------------------\n");
36419 if (! args)
36421 cur_tree = ((pop_target)
36422 ? pop_target
36423 : target_option_default_node);
36424 cl_target_option_restore (&global_options,
36425 TREE_TARGET_OPTION (cur_tree));
36427 else
36429 rs6000_cpu_index = rs6000_tune_index = -1;
36430 if (!rs6000_inner_target_options (args, false)
36431 || !rs6000_option_override_internal (false)
36432 || (cur_tree = build_target_option_node (&global_options))
36433 == NULL_TREE)
36435 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36436 fprintf (stderr, "invalid pragma\n");
36438 return false;
36442 target_option_current_node = cur_tree;
36444 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36445 change the macros that are defined. */
36446 if (rs6000_target_modify_macros_ptr)
36448 prev_opt = TREE_TARGET_OPTION (prev_tree);
36449 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36450 prev_flags = prev_opt->x_rs6000_isa_flags;
36452 cur_opt = TREE_TARGET_OPTION (cur_tree);
36453 cur_flags = cur_opt->x_rs6000_isa_flags;
36454 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36456 diff_bumask = (prev_bumask ^ cur_bumask);
36457 diff_flags = (prev_flags ^ cur_flags);
36459 if ((diff_flags != 0) || (diff_bumask != 0))
36461 /* Delete old macros. */
36462 rs6000_target_modify_macros_ptr (false,
36463 prev_flags & diff_flags,
36464 prev_bumask & diff_bumask);
36466 /* Define new macros. */
36467 rs6000_target_modify_macros_ptr (true,
36468 cur_flags & diff_flags,
36469 cur_bumask & diff_bumask);
36473 return true;
36477 /* Remember the last target of rs6000_set_current_function. */
36478 static GTY(()) tree rs6000_previous_fndecl;
36480 /* Establish appropriate back-end context for processing the function
36481 FNDECL. The argument might be NULL to indicate processing at top
36482 level, outside of any function scope. */
36483 static void
36484 rs6000_set_current_function (tree fndecl)
36486 tree old_tree = (rs6000_previous_fndecl
36487 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
36488 : NULL_TREE);
36490 tree new_tree = (fndecl
36491 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
36492 : NULL_TREE);
36494 if (TARGET_DEBUG_TARGET)
36496 bool print_final = false;
36497 fprintf (stderr, "\n==================== rs6000_set_current_function");
36499 if (fndecl)
36500 fprintf (stderr, ", fndecl %s (%p)",
36501 (DECL_NAME (fndecl)
36502 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36503 : "<unknown>"), (void *)fndecl);
36505 if (rs6000_previous_fndecl)
36506 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36508 fprintf (stderr, "\n");
36509 if (new_tree)
36511 fprintf (stderr, "\nnew fndecl target specific options:\n");
36512 debug_tree (new_tree);
36513 print_final = true;
36516 if (old_tree)
36518 fprintf (stderr, "\nold fndecl target specific options:\n");
36519 debug_tree (old_tree);
36520 print_final = true;
36523 if (print_final)
36524 fprintf (stderr, "--------------------\n");
36527 /* Only change the context if the function changes. This hook is called
36528 several times in the course of compiling a function, and we don't want to
36529 slow things down too much or call target_reinit when it isn't safe. */
36530 if (fndecl && fndecl != rs6000_previous_fndecl)
36532 rs6000_previous_fndecl = fndecl;
36533 if (old_tree == new_tree)
36536 else if (new_tree && new_tree != target_option_default_node)
36538 cl_target_option_restore (&global_options,
36539 TREE_TARGET_OPTION (new_tree));
36540 if (TREE_TARGET_GLOBALS (new_tree))
36541 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36542 else
36543 TREE_TARGET_GLOBALS (new_tree)
36544 = save_target_globals_default_opts ();
36547 else if (old_tree && old_tree != target_option_default_node)
36549 new_tree = target_option_current_node;
36550 cl_target_option_restore (&global_options,
36551 TREE_TARGET_OPTION (new_tree));
36552 if (TREE_TARGET_GLOBALS (new_tree))
36553 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36554 else if (new_tree == target_option_default_node)
36555 restore_target_globals (&default_target_globals);
36556 else
36557 TREE_TARGET_GLOBALS (new_tree)
36558 = save_target_globals_default_opts ();
36564 /* Save the current options */
36566 static void
36567 rs6000_function_specific_save (struct cl_target_option *ptr,
36568 struct gcc_options *opts)
36570 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36571 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36574 /* Restore the current options */
36576 static void
36577 rs6000_function_specific_restore (struct gcc_options *opts,
36578 struct cl_target_option *ptr)
36581 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36582 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36583 (void) rs6000_option_override_internal (false);
36586 /* Print the current options */
36588 static void
36589 rs6000_function_specific_print (FILE *file, int indent,
36590 struct cl_target_option *ptr)
36592 rs6000_print_isa_options (file, indent, "Isa options set",
36593 ptr->x_rs6000_isa_flags);
36595 rs6000_print_isa_options (file, indent, "Isa options explicit",
36596 ptr->x_rs6000_isa_flags_explicit);
36599 /* Helper function to print the current isa or misc options on a line. */
36601 static void
36602 rs6000_print_options_internal (FILE *file,
36603 int indent,
36604 const char *string,
36605 HOST_WIDE_INT flags,
36606 const char *prefix,
36607 const struct rs6000_opt_mask *opts,
36608 size_t num_elements)
36610 size_t i;
36611 size_t start_column = 0;
36612 size_t cur_column;
36613 size_t max_column = 120;
36614 size_t prefix_len = strlen (prefix);
36615 size_t comma_len = 0;
36616 const char *comma = "";
36618 if (indent)
36619 start_column += fprintf (file, "%*s", indent, "");
36621 if (!flags)
36623 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36624 return;
36627 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36629 /* Print the various mask options. */
36630 cur_column = start_column;
36631 for (i = 0; i < num_elements; i++)
36633 bool invert = opts[i].invert;
36634 const char *name = opts[i].name;
36635 const char *no_str = "";
36636 HOST_WIDE_INT mask = opts[i].mask;
36637 size_t len = comma_len + prefix_len + strlen (name);
36639 if (!invert)
36641 if ((flags & mask) == 0)
36643 no_str = "no-";
36644 len += sizeof ("no-") - 1;
36647 flags &= ~mask;
36650 else
36652 if ((flags & mask) != 0)
36654 no_str = "no-";
36655 len += sizeof ("no-") - 1;
36658 flags |= mask;
36661 cur_column += len;
36662 if (cur_column > max_column)
36664 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36665 cur_column = start_column + len;
36666 comma = "";
36669 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36670 comma = ", ";
36671 comma_len = sizeof (", ") - 1;
36674 fputs ("\n", file);
36677 /* Helper function to print the current isa options on a line. */
36679 static void
36680 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36681 HOST_WIDE_INT flags)
36683 rs6000_print_options_internal (file, indent, string, flags, "-m",
36684 &rs6000_opt_masks[0],
36685 ARRAY_SIZE (rs6000_opt_masks));
36688 static void
36689 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36690 HOST_WIDE_INT flags)
36692 rs6000_print_options_internal (file, indent, string, flags, "",
36693 &rs6000_builtin_mask_names[0],
36694 ARRAY_SIZE (rs6000_builtin_mask_names));
36697 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36698 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36699 -mvsx-timode, -mupper-regs-df).
36701 If the user used -mno-power8-vector, we need to turn off all of the implicit
36702 ISA 2.07 and 3.0 options that relate to the vector unit.
36704 If the user used -mno-power9-vector, we need to turn off all of the implicit
36705 ISA 3.0 options that relate to the vector unit.
36707 This function does not handle explicit options such as the user specifying
36708 -mdirect-move. These are handled in rs6000_option_override_internal, and
36709 the appropriate error is given if needed.
36711 We return a mask of all of the implicit options that should not be enabled
36712 by default. */
36714 static HOST_WIDE_INT
36715 rs6000_disable_incompatible_switches (void)
36717 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36718 size_t i, j;
36720 static const struct {
36721 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36722 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36723 const char *const name; /* name of the switch. */
36724 } flags[] = {
36725 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36726 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36727 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36730 for (i = 0; i < ARRAY_SIZE (flags); i++)
36732 HOST_WIDE_INT no_flag = flags[i].no_flag;
36734 if ((rs6000_isa_flags & no_flag) == 0
36735 && (rs6000_isa_flags_explicit & no_flag) != 0)
36737 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36738 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36739 & rs6000_isa_flags
36740 & dep_flags);
36742 if (set_flags)
36744 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36745 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36747 set_flags &= ~rs6000_opt_masks[j].mask;
36748 error ("-mno-%s turns off -m%s",
36749 flags[i].name,
36750 rs6000_opt_masks[j].name);
36753 gcc_assert (!set_flags);
36756 rs6000_isa_flags &= ~dep_flags;
36757 ignore_masks |= no_flag | dep_flags;
36761 if (!TARGET_P9_VECTOR
36762 && (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) != 0
36763 && TARGET_P9_DFORM_BOTH > 0)
36765 error ("-mno-power9-vector turns off -mpower9-dform");
36766 TARGET_P9_DFORM_BOTH = 0;
36769 return ignore_masks;
36773 /* Helper function for printing the function name when debugging. */
36775 static const char *
36776 get_decl_name (tree fn)
36778 tree name;
36780 if (!fn)
36781 return "<null>";
36783 name = DECL_NAME (fn);
36784 if (!name)
36785 return "<no-name>";
36787 return IDENTIFIER_POINTER (name);
36790 /* Return the clone id of the target we are compiling code for in a target
36791 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36792 the priority list for the target clones (ordered from lowest to
36793 highest). */
36795 static int
36796 rs6000_clone_priority (tree fndecl)
36798 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36799 HOST_WIDE_INT isa_masks;
36800 int ret = CLONE_DEFAULT;
36801 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36802 const char *attrs_str = NULL;
36804 attrs = TREE_VALUE (TREE_VALUE (attrs));
36805 attrs_str = TREE_STRING_POINTER (attrs);
36807 /* Return priority zero for default function. Return the ISA needed for the
36808 function if it is not the default. */
36809 if (strcmp (attrs_str, "default") != 0)
36811 if (fn_opts == NULL_TREE)
36812 fn_opts = target_option_default_node;
36814 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
36815 isa_masks = rs6000_isa_flags;
36816 else
36817 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
36819 for (ret = CLONE_MAX - 1; ret != 0; ret--)
36820 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
36821 break;
36824 if (TARGET_DEBUG_TARGET)
36825 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
36826 get_decl_name (fndecl), ret);
36828 return ret;
36831 /* This compares the priority of target features in function DECL1 and DECL2.
36832 It returns positive value if DECL1 is higher priority, negative value if
36833 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36834 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36836 static int
36837 rs6000_compare_version_priority (tree decl1, tree decl2)
36839 int priority1 = rs6000_clone_priority (decl1);
36840 int priority2 = rs6000_clone_priority (decl2);
36841 int ret = priority1 - priority2;
36843 if (TARGET_DEBUG_TARGET)
36844 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
36845 get_decl_name (decl1), get_decl_name (decl2), ret);
36847 return ret;
36850 /* Make a dispatcher declaration for the multi-versioned function DECL.
36851 Calls to DECL function will be replaced with calls to the dispatcher
36852 by the front-end. Returns the decl of the dispatcher function. */
36854 static tree
36855 rs6000_get_function_versions_dispatcher (void *decl)
36857 tree fn = (tree) decl;
36858 struct cgraph_node *node = NULL;
36859 struct cgraph_node *default_node = NULL;
36860 struct cgraph_function_version_info *node_v = NULL;
36861 struct cgraph_function_version_info *first_v = NULL;
36863 tree dispatch_decl = NULL;
36865 struct cgraph_function_version_info *default_version_info = NULL;
36866 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
36868 if (TARGET_DEBUG_TARGET)
36869 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
36870 get_decl_name (fn));
36872 node = cgraph_node::get (fn);
36873 gcc_assert (node != NULL);
36875 node_v = node->function_version ();
36876 gcc_assert (node_v != NULL);
36878 if (node_v->dispatcher_resolver != NULL)
36879 return node_v->dispatcher_resolver;
36881 /* Find the default version and make it the first node. */
36882 first_v = node_v;
36883 /* Go to the beginning of the chain. */
36884 while (first_v->prev != NULL)
36885 first_v = first_v->prev;
36887 default_version_info = first_v;
36888 while (default_version_info != NULL)
36890 const tree decl2 = default_version_info->this_node->decl;
36891 if (is_function_default_version (decl2))
36892 break;
36893 default_version_info = default_version_info->next;
36896 /* If there is no default node, just return NULL. */
36897 if (default_version_info == NULL)
36898 return NULL;
36900 /* Make default info the first node. */
36901 if (first_v != default_version_info)
36903 default_version_info->prev->next = default_version_info->next;
36904 if (default_version_info->next)
36905 default_version_info->next->prev = default_version_info->prev;
36906 first_v->prev = default_version_info;
36907 default_version_info->next = first_v;
36908 default_version_info->prev = NULL;
36911 default_node = default_version_info->this_node;
36913 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
36914 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36915 "target_clones attribute needs GLIBC (2.23 and newer) that "
36916 "exports hardware capability bits");
36917 #else
36919 if (targetm.has_ifunc_p ())
36921 struct cgraph_function_version_info *it_v = NULL;
36922 struct cgraph_node *dispatcher_node = NULL;
36923 struct cgraph_function_version_info *dispatcher_version_info = NULL;
36925 /* Right now, the dispatching is done via ifunc. */
36926 dispatch_decl = make_dispatcher_decl (default_node->decl);
36928 dispatcher_node = cgraph_node::get_create (dispatch_decl);
36929 gcc_assert (dispatcher_node != NULL);
36930 dispatcher_node->dispatcher_function = 1;
36931 dispatcher_version_info
36932 = dispatcher_node->insert_new_function_version ();
36933 dispatcher_version_info->next = default_version_info;
36934 dispatcher_node->definition = 1;
36936 /* Set the dispatcher for all the versions. */
36937 it_v = default_version_info;
36938 while (it_v != NULL)
36940 it_v->dispatcher_resolver = dispatch_decl;
36941 it_v = it_v->next;
36944 else
36946 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36947 "multiversioning needs ifunc which is not supported "
36948 "on this target");
36950 #endif
36952 return dispatch_decl;
36955 /* Make the resolver function decl to dispatch the versions of a multi-
36956 versioned function, DEFAULT_DECL. Create an empty basic block in the
36957 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
36958 function. */
36960 static tree
36961 make_resolver_func (const tree default_decl,
36962 const tree dispatch_decl,
36963 basic_block *empty_bb)
36965 /* Make the resolver function static. The resolver function returns
36966 void *. */
36967 tree decl_name = clone_function_name (default_decl, "resolver");
36968 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
36969 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
36970 tree decl = build_fn_decl (resolver_name, type);
36971 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
36973 DECL_NAME (decl) = decl_name;
36974 TREE_USED (decl) = 1;
36975 DECL_ARTIFICIAL (decl) = 1;
36976 DECL_IGNORED_P (decl) = 0;
36977 TREE_PUBLIC (decl) = 0;
36978 DECL_UNINLINABLE (decl) = 1;
36980 /* Resolver is not external, body is generated. */
36981 DECL_EXTERNAL (decl) = 0;
36982 DECL_EXTERNAL (dispatch_decl) = 0;
36984 DECL_CONTEXT (decl) = NULL_TREE;
36985 DECL_INITIAL (decl) = make_node (BLOCK);
36986 DECL_STATIC_CONSTRUCTOR (decl) = 0;
36988 /* Build result decl and add to function_decl. */
36989 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
36990 DECL_ARTIFICIAL (t) = 1;
36991 DECL_IGNORED_P (t) = 1;
36992 DECL_RESULT (decl) = t;
36994 gimplify_function_tree (decl);
36995 push_cfun (DECL_STRUCT_FUNCTION (decl));
36996 *empty_bb = init_lowered_empty_function (decl, false,
36997 profile_count::uninitialized ());
36999 cgraph_node::add_new_function (decl, true);
37000 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37002 pop_cfun ();
37004 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37005 DECL_ATTRIBUTES (dispatch_decl)
37006 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37008 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37010 return decl;
37013 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37014 return a pointer to VERSION_DECL if we are running on a machine that
37015 supports the index CLONE_ISA hardware architecture bits. This function will
37016 be called during version dispatch to decide which function version to
37017 execute. It returns the basic block at the end, to which more conditions
37018 can be added. */
37020 static basic_block
37021 add_condition_to_bb (tree function_decl, tree version_decl,
37022 int clone_isa, basic_block new_bb)
37024 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37026 gcc_assert (new_bb != NULL);
37027 gimple_seq gseq = bb_seq (new_bb);
37030 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37031 build_fold_addr_expr (version_decl));
37032 tree result_var = create_tmp_var (ptr_type_node);
37033 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37034 gimple *return_stmt = gimple_build_return (result_var);
37036 if (clone_isa == CLONE_DEFAULT)
37038 gimple_seq_add_stmt (&gseq, convert_stmt);
37039 gimple_seq_add_stmt (&gseq, return_stmt);
37040 set_bb_seq (new_bb, gseq);
37041 gimple_set_bb (convert_stmt, new_bb);
37042 gimple_set_bb (return_stmt, new_bb);
37043 pop_cfun ();
37044 return new_bb;
37047 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37048 tree cond_var = create_tmp_var (bool_int_type_node);
37049 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37050 const char *arg_str = rs6000_clone_map[clone_isa].name;
37051 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37052 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37053 gimple_call_set_lhs (call_cond_stmt, cond_var);
37055 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37056 gimple_set_bb (call_cond_stmt, new_bb);
37057 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37059 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37060 NULL_TREE, NULL_TREE);
37061 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37062 gimple_set_bb (if_else_stmt, new_bb);
37063 gimple_seq_add_stmt (&gseq, if_else_stmt);
37065 gimple_seq_add_stmt (&gseq, convert_stmt);
37066 gimple_seq_add_stmt (&gseq, return_stmt);
37067 set_bb_seq (new_bb, gseq);
37069 basic_block bb1 = new_bb;
37070 edge e12 = split_block (bb1, if_else_stmt);
37071 basic_block bb2 = e12->dest;
37072 e12->flags &= ~EDGE_FALLTHRU;
37073 e12->flags |= EDGE_TRUE_VALUE;
37075 edge e23 = split_block (bb2, return_stmt);
37076 gimple_set_bb (convert_stmt, bb2);
37077 gimple_set_bb (return_stmt, bb2);
37079 basic_block bb3 = e23->dest;
37080 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37082 remove_edge (e23);
37083 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37085 pop_cfun ();
37086 return bb3;
37089 /* This function generates the dispatch function for multi-versioned functions.
37090 DISPATCH_DECL is the function which will contain the dispatch logic.
37091 FNDECLS are the function choices for dispatch, and is a tree chain.
37092 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37093 code is generated. */
37095 static int
37096 dispatch_function_versions (tree dispatch_decl,
37097 void *fndecls_p,
37098 basic_block *empty_bb)
37100 int ix;
37101 tree ele;
37102 vec<tree> *fndecls;
37103 tree clones[CLONE_MAX];
37105 if (TARGET_DEBUG_TARGET)
37106 fputs ("dispatch_function_versions, top\n", stderr);
37108 gcc_assert (dispatch_decl != NULL
37109 && fndecls_p != NULL
37110 && empty_bb != NULL);
37112 /* fndecls_p is actually a vector. */
37113 fndecls = static_cast<vec<tree> *> (fndecls_p);
37115 /* At least one more version other than the default. */
37116 gcc_assert (fndecls->length () >= 2);
37118 /* The first version in the vector is the default decl. */
37119 memset ((void *) clones, '\0', sizeof (clones));
37120 clones[CLONE_DEFAULT] = (*fndecls)[0];
37122 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37123 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37124 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37125 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37126 to insert the code here to do the call. */
37128 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37130 int priority = rs6000_clone_priority (ele);
37131 if (!clones[priority])
37132 clones[priority] = ele;
37135 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37136 if (clones[ix])
37138 if (TARGET_DEBUG_TARGET)
37139 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37140 ix, get_decl_name (clones[ix]));
37142 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37143 *empty_bb);
37146 return 0;
37149 /* Generate the dispatching code body to dispatch multi-versioned function
37150 DECL. The target hook is called to process the "target" attributes and
37151 provide the code to dispatch the right function at run-time. NODE points
37152 to the dispatcher decl whose body will be created. */
37154 static tree
37155 rs6000_generate_version_dispatcher_body (void *node_p)
37157 tree resolver;
37158 basic_block empty_bb;
37159 struct cgraph_node *node = (cgraph_node *) node_p;
37160 struct cgraph_function_version_info *ninfo = node->function_version ();
37162 if (ninfo->dispatcher_resolver)
37163 return ninfo->dispatcher_resolver;
37165 /* node is going to be an alias, so remove the finalized bit. */
37166 node->definition = false;
37168 /* The first version in the chain corresponds to the default version. */
37169 ninfo->dispatcher_resolver = resolver
37170 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37172 if (TARGET_DEBUG_TARGET)
37173 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37174 get_decl_name (resolver));
37176 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37177 auto_vec<tree, 2> fn_ver_vec;
37179 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37180 vinfo;
37181 vinfo = vinfo->next)
37183 struct cgraph_node *version = vinfo->this_node;
37184 /* Check for virtual functions here again, as by this time it should
37185 have been determined if this function needs a vtable index or
37186 not. This happens for methods in derived classes that override
37187 virtual methods in base classes but are not explicitly marked as
37188 virtual. */
37189 if (DECL_VINDEX (version->decl))
37190 sorry ("Virtual function multiversioning not supported");
37192 fn_ver_vec.safe_push (version->decl);
37195 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37196 cgraph_edge::rebuild_edges ();
37197 pop_cfun ();
37198 return resolver;
37202 /* Hook to determine if one function can safely inline another. */
37204 static bool
37205 rs6000_can_inline_p (tree caller, tree callee)
37207 bool ret = false;
37208 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37209 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37211 /* If callee has no option attributes, then it is ok to inline. */
37212 if (!callee_tree)
37213 ret = true;
37215 /* If caller has no option attributes, but callee does then it is not ok to
37216 inline. */
37217 else if (!caller_tree)
37218 ret = false;
37220 else
37222 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37223 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37225 /* Callee's options should a subset of the caller's, i.e. a vsx function
37226 can inline an altivec function but a non-vsx function can't inline a
37227 vsx function. */
37228 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37229 == callee_opts->x_rs6000_isa_flags)
37230 ret = true;
37233 if (TARGET_DEBUG_TARGET)
37234 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37235 get_decl_name (caller), get_decl_name (callee),
37236 (ret ? "can" : "cannot"));
37238 return ret;
37241 /* Allocate a stack temp and fixup the address so it meets the particular
37242 memory requirements (either offetable or REG+REG addressing). */
37245 rs6000_allocate_stack_temp (machine_mode mode,
37246 bool offsettable_p,
37247 bool reg_reg_p)
37249 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37250 rtx addr = XEXP (stack, 0);
37251 int strict_p = reload_completed;
37253 if (!legitimate_indirect_address_p (addr, strict_p))
37255 if (offsettable_p
37256 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37257 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37259 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37260 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37263 return stack;
37266 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37267 to such a form to deal with memory reference instructions like STFIWX that
37268 only take reg+reg addressing. */
37271 rs6000_address_for_fpconvert (rtx x)
37273 rtx addr;
37275 gcc_assert (MEM_P (x));
37276 addr = XEXP (x, 0);
37277 if (! legitimate_indirect_address_p (addr, reload_completed)
37278 && ! legitimate_indexed_address_p (addr, reload_completed))
37280 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37282 rtx reg = XEXP (addr, 0);
37283 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37284 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37285 gcc_assert (REG_P (reg));
37286 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37287 addr = reg;
37289 else if (GET_CODE (addr) == PRE_MODIFY)
37291 rtx reg = XEXP (addr, 0);
37292 rtx expr = XEXP (addr, 1);
37293 gcc_assert (REG_P (reg));
37294 gcc_assert (GET_CODE (expr) == PLUS);
37295 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37296 addr = reg;
37299 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37302 return x;
37305 /* Given a memory reference, if it is not in the form for altivec memory
37306 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37307 convert to the altivec format. */
37310 rs6000_address_for_altivec (rtx x)
37312 gcc_assert (MEM_P (x));
37313 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
37315 rtx addr = XEXP (x, 0);
37317 if (!legitimate_indexed_address_p (addr, reload_completed)
37318 && !legitimate_indirect_address_p (addr, reload_completed))
37319 addr = copy_to_mode_reg (Pmode, addr);
37321 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
37322 x = change_address (x, GET_MODE (x), addr);
37325 return x;
37328 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37330 On the RS/6000, all integer constants are acceptable, most won't be valid
37331 for particular insns, though. Only easy FP constants are acceptable. */
37333 static bool
37334 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37336 if (TARGET_ELF && tls_referenced_p (x))
37337 return false;
37339 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37340 || GET_MODE (x) == VOIDmode
37341 || (TARGET_POWERPC64 && mode == DImode)
37342 || easy_fp_constant (x, mode)
37343 || easy_vector_constant (x, mode));
37347 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37349 static bool
37350 chain_already_loaded (rtx_insn *last)
37352 for (; last != NULL; last = PREV_INSN (last))
37354 if (NONJUMP_INSN_P (last))
37356 rtx patt = PATTERN (last);
37358 if (GET_CODE (patt) == SET)
37360 rtx lhs = XEXP (patt, 0);
37362 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37363 return true;
37367 return false;
37370 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37372 void
37373 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37375 const bool direct_call_p
37376 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37377 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37378 rtx toc_load = NULL_RTX;
37379 rtx toc_restore = NULL_RTX;
37380 rtx func_addr;
37381 rtx abi_reg = NULL_RTX;
37382 rtx call[4];
37383 int n_call;
37384 rtx insn;
37386 /* Handle longcall attributes. */
37387 if (INTVAL (cookie) & CALL_LONG)
37388 func_desc = rs6000_longcall_ref (func_desc);
37390 /* Handle indirect calls. */
37391 if (GET_CODE (func_desc) != SYMBOL_REF
37392 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37394 /* Save the TOC into its reserved slot before the call,
37395 and prepare to restore it after the call. */
37396 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37397 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37398 rtx stack_toc_mem = gen_frame_mem (Pmode,
37399 gen_rtx_PLUS (Pmode, stack_ptr,
37400 stack_toc_offset));
37401 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37402 gen_rtvec (1, stack_toc_offset),
37403 UNSPEC_TOCSLOT);
37404 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37406 /* Can we optimize saving the TOC in the prologue or
37407 do we need to do it at every call? */
37408 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37409 cfun->machine->save_toc_in_prologue = true;
37410 else
37412 MEM_VOLATILE_P (stack_toc_mem) = 1;
37413 emit_move_insn (stack_toc_mem, toc_reg);
37416 if (DEFAULT_ABI == ABI_ELFv2)
37418 /* A function pointer in the ELFv2 ABI is just a plain address, but
37419 the ABI requires it to be loaded into r12 before the call. */
37420 func_addr = gen_rtx_REG (Pmode, 12);
37421 emit_move_insn (func_addr, func_desc);
37422 abi_reg = func_addr;
37424 else
37426 /* A function pointer under AIX is a pointer to a data area whose
37427 first word contains the actual address of the function, whose
37428 second word contains a pointer to its TOC, and whose third word
37429 contains a value to place in the static chain register (r11).
37430 Note that if we load the static chain, our "trampoline" need
37431 not have any executable code. */
37433 /* Load up address of the actual function. */
37434 func_desc = force_reg (Pmode, func_desc);
37435 func_addr = gen_reg_rtx (Pmode);
37436 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37438 /* Prepare to load the TOC of the called function. Note that the
37439 TOC load must happen immediately before the actual call so
37440 that unwinding the TOC registers works correctly. See the
37441 comment in frob_update_context. */
37442 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37443 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37444 gen_rtx_PLUS (Pmode, func_desc,
37445 func_toc_offset));
37446 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37448 /* If we have a static chain, load it up. But, if the call was
37449 originally direct, the 3rd word has not been written since no
37450 trampoline has been built, so we ought not to load it, lest we
37451 override a static chain value. */
37452 if (!direct_call_p
37453 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37454 && !chain_already_loaded (get_current_sequence ()->next->last))
37456 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37457 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37458 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37459 gen_rtx_PLUS (Pmode, func_desc,
37460 func_sc_offset));
37461 emit_move_insn (sc_reg, func_sc_mem);
37462 abi_reg = sc_reg;
37466 else
37468 /* Direct calls use the TOC: for local calls, the callee will
37469 assume the TOC register is set; for non-local calls, the
37470 PLT stub needs the TOC register. */
37471 abi_reg = toc_reg;
37472 func_addr = func_desc;
37475 /* Create the call. */
37476 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37477 if (value != NULL_RTX)
37478 call[0] = gen_rtx_SET (value, call[0]);
37479 n_call = 1;
37481 if (toc_load)
37482 call[n_call++] = toc_load;
37483 if (toc_restore)
37484 call[n_call++] = toc_restore;
37486 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37488 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37489 insn = emit_call_insn (insn);
37491 /* Mention all registers defined by the ABI to hold information
37492 as uses in CALL_INSN_FUNCTION_USAGE. */
37493 if (abi_reg)
37494 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37497 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37499 void
37500 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37502 rtx call[2];
37503 rtx insn;
37505 gcc_assert (INTVAL (cookie) == 0);
37507 /* Create the call. */
37508 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37509 if (value != NULL_RTX)
37510 call[0] = gen_rtx_SET (value, call[0]);
37512 call[1] = simple_return_rtx;
37514 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37515 insn = emit_call_insn (insn);
37517 /* Note use of the TOC register. */
37518 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37521 /* Return whether we need to always update the saved TOC pointer when we update
37522 the stack pointer. */
37524 static bool
37525 rs6000_save_toc_in_prologue_p (void)
37527 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37530 #ifdef HAVE_GAS_HIDDEN
37531 # define USE_HIDDEN_LINKONCE 1
37532 #else
37533 # define USE_HIDDEN_LINKONCE 0
37534 #endif
37536 /* Fills in the label name that should be used for a 476 link stack thunk. */
37538 void
37539 get_ppc476_thunk_name (char name[32])
37541 gcc_assert (TARGET_LINK_STACK);
37543 if (USE_HIDDEN_LINKONCE)
37544 sprintf (name, "__ppc476.get_thunk");
37545 else
37546 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37549 /* This function emits the simple thunk routine that is used to preserve
37550 the link stack on the 476 cpu. */
37552 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37553 static void
37554 rs6000_code_end (void)
37556 char name[32];
37557 tree decl;
37559 if (!TARGET_LINK_STACK)
37560 return;
37562 get_ppc476_thunk_name (name);
37564 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37565 build_function_type_list (void_type_node, NULL_TREE));
37566 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37567 NULL_TREE, void_type_node);
37568 TREE_PUBLIC (decl) = 1;
37569 TREE_STATIC (decl) = 1;
37571 #if RS6000_WEAK
37572 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37574 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37575 targetm.asm_out.unique_section (decl, 0);
37576 switch_to_section (get_named_section (decl, NULL, 0));
37577 DECL_WEAK (decl) = 1;
37578 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37579 targetm.asm_out.globalize_label (asm_out_file, name);
37580 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37581 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37583 else
37584 #endif
37586 switch_to_section (text_section);
37587 ASM_OUTPUT_LABEL (asm_out_file, name);
37590 DECL_INITIAL (decl) = make_node (BLOCK);
37591 current_function_decl = decl;
37592 allocate_struct_function (decl, false);
37593 init_function_start (decl);
37594 first_function_block_is_cold = false;
37595 /* Make sure unwind info is emitted for the thunk if needed. */
37596 final_start_function (emit_barrier (), asm_out_file, 1);
37598 fputs ("\tblr\n", asm_out_file);
37600 final_end_function ();
37601 init_insn_lengths ();
37602 free_after_compilation (cfun);
37603 set_cfun (NULL);
37604 current_function_decl = NULL;
37607 /* Add r30 to hard reg set if the prologue sets it up and it is not
37608 pic_offset_table_rtx. */
37610 static void
37611 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37613 if (!TARGET_SINGLE_PIC_BASE
37614 && TARGET_TOC
37615 && TARGET_MINIMAL_TOC
37616 && !constant_pool_empty_p ())
37617 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37618 if (cfun->machine->split_stack_argp_used)
37619 add_to_hard_reg_set (&set->set, Pmode, 12);
37623 /* Helper function for rs6000_split_logical to emit a logical instruction after
37624 spliting the operation to single GPR registers.
37626 DEST is the destination register.
37627 OP1 and OP2 are the input source registers.
37628 CODE is the base operation (AND, IOR, XOR, NOT).
37629 MODE is the machine mode.
37630 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37631 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37632 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37634 static void
37635 rs6000_split_logical_inner (rtx dest,
37636 rtx op1,
37637 rtx op2,
37638 enum rtx_code code,
37639 machine_mode mode,
37640 bool complement_final_p,
37641 bool complement_op1_p,
37642 bool complement_op2_p)
37644 rtx bool_rtx;
37646 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37647 if (op2 && GET_CODE (op2) == CONST_INT
37648 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37649 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37651 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37652 HOST_WIDE_INT value = INTVAL (op2) & mask;
37654 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37655 if (code == AND)
37657 if (value == 0)
37659 emit_insn (gen_rtx_SET (dest, const0_rtx));
37660 return;
37663 else if (value == mask)
37665 if (!rtx_equal_p (dest, op1))
37666 emit_insn (gen_rtx_SET (dest, op1));
37667 return;
37671 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37672 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37673 else if (code == IOR || code == XOR)
37675 if (value == 0)
37677 if (!rtx_equal_p (dest, op1))
37678 emit_insn (gen_rtx_SET (dest, op1));
37679 return;
37684 if (code == AND && mode == SImode
37685 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37687 emit_insn (gen_andsi3 (dest, op1, op2));
37688 return;
37691 if (complement_op1_p)
37692 op1 = gen_rtx_NOT (mode, op1);
37694 if (complement_op2_p)
37695 op2 = gen_rtx_NOT (mode, op2);
37697 /* For canonical RTL, if only one arm is inverted it is the first. */
37698 if (!complement_op1_p && complement_op2_p)
37699 std::swap (op1, op2);
37701 bool_rtx = ((code == NOT)
37702 ? gen_rtx_NOT (mode, op1)
37703 : gen_rtx_fmt_ee (code, mode, op1, op2));
37705 if (complement_final_p)
37706 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37708 emit_insn (gen_rtx_SET (dest, bool_rtx));
37711 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37712 operations are split immediately during RTL generation to allow for more
37713 optimizations of the AND/IOR/XOR.
37715 OPERANDS is an array containing the destination and two input operands.
37716 CODE is the base operation (AND, IOR, XOR, NOT).
37717 MODE is the machine mode.
37718 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37719 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37720 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37721 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37722 formation of the AND instructions. */
37724 static void
37725 rs6000_split_logical_di (rtx operands[3],
37726 enum rtx_code code,
37727 bool complement_final_p,
37728 bool complement_op1_p,
37729 bool complement_op2_p)
37731 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37732 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37733 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37734 enum hi_lo { hi = 0, lo = 1 };
37735 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37736 size_t i;
37738 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37739 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37740 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37741 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37743 if (code == NOT)
37744 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37745 else
37747 if (GET_CODE (operands[2]) != CONST_INT)
37749 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37750 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37752 else
37754 HOST_WIDE_INT value = INTVAL (operands[2]);
37755 HOST_WIDE_INT value_hi_lo[2];
37757 gcc_assert (!complement_final_p);
37758 gcc_assert (!complement_op1_p);
37759 gcc_assert (!complement_op2_p);
37761 value_hi_lo[hi] = value >> 32;
37762 value_hi_lo[lo] = value & lower_32bits;
37764 for (i = 0; i < 2; i++)
37766 HOST_WIDE_INT sub_value = value_hi_lo[i];
37768 if (sub_value & sign_bit)
37769 sub_value |= upper_32bits;
37771 op2_hi_lo[i] = GEN_INT (sub_value);
37773 /* If this is an AND instruction, check to see if we need to load
37774 the value in a register. */
37775 if (code == AND && sub_value != -1 && sub_value != 0
37776 && !and_operand (op2_hi_lo[i], SImode))
37777 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37782 for (i = 0; i < 2; i++)
37784 /* Split large IOR/XOR operations. */
37785 if ((code == IOR || code == XOR)
37786 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37787 && !complement_final_p
37788 && !complement_op1_p
37789 && !complement_op2_p
37790 && !logical_const_operand (op2_hi_lo[i], SImode))
37792 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37793 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37794 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37795 rtx tmp = gen_reg_rtx (SImode);
37797 /* Make sure the constant is sign extended. */
37798 if ((hi_16bits & sign_bit) != 0)
37799 hi_16bits |= upper_32bits;
37801 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37802 code, SImode, false, false, false);
37804 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37805 code, SImode, false, false, false);
37807 else
37808 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37809 code, SImode, complement_final_p,
37810 complement_op1_p, complement_op2_p);
37813 return;
37816 /* Split the insns that make up boolean operations operating on multiple GPR
37817 registers. The boolean MD patterns ensure that the inputs either are
37818 exactly the same as the output registers, or there is no overlap.
37820 OPERANDS is an array containing the destination and two input operands.
37821 CODE is the base operation (AND, IOR, XOR, NOT).
37822 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37823 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37824 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37826 void
37827 rs6000_split_logical (rtx operands[3],
37828 enum rtx_code code,
37829 bool complement_final_p,
37830 bool complement_op1_p,
37831 bool complement_op2_p)
37833 machine_mode mode = GET_MODE (operands[0]);
37834 machine_mode sub_mode;
37835 rtx op0, op1, op2;
37836 int sub_size, regno0, regno1, nregs, i;
37838 /* If this is DImode, use the specialized version that can run before
37839 register allocation. */
37840 if (mode == DImode && !TARGET_POWERPC64)
37842 rs6000_split_logical_di (operands, code, complement_final_p,
37843 complement_op1_p, complement_op2_p);
37844 return;
37847 op0 = operands[0];
37848 op1 = operands[1];
37849 op2 = (code == NOT) ? NULL_RTX : operands[2];
37850 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37851 sub_size = GET_MODE_SIZE (sub_mode);
37852 regno0 = REGNO (op0);
37853 regno1 = REGNO (op1);
37855 gcc_assert (reload_completed);
37856 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37857 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37859 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37860 gcc_assert (nregs > 1);
37862 if (op2 && REG_P (op2))
37863 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37865 for (i = 0; i < nregs; i++)
37867 int offset = i * sub_size;
37868 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37869 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37870 rtx sub_op2 = ((code == NOT)
37871 ? NULL_RTX
37872 : simplify_subreg (sub_mode, op2, mode, offset));
37874 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37875 complement_final_p, complement_op1_p,
37876 complement_op2_p);
37879 return;
37883 /* Return true if the peephole2 can combine a load involving a combination of
37884 an addis instruction and a load with an offset that can be fused together on
37885 a power8. */
37887 bool
37888 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
37889 rtx addis_value, /* addis value. */
37890 rtx target, /* target register that is loaded. */
37891 rtx mem) /* bottom part of the memory addr. */
37893 rtx addr;
37894 rtx base_reg;
37896 /* Validate arguments. */
37897 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37898 return false;
37900 if (!base_reg_operand (target, GET_MODE (target)))
37901 return false;
37903 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37904 return false;
37906 /* Allow sign/zero extension. */
37907 if (GET_CODE (mem) == ZERO_EXTEND
37908 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
37909 mem = XEXP (mem, 0);
37911 if (!MEM_P (mem))
37912 return false;
37914 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
37915 return false;
37917 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37918 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
37919 return false;
37921 /* Validate that the register used to load the high value is either the
37922 register being loaded, or we can safely replace its use.
37924 This function is only called from the peephole2 pass and we assume that
37925 there are 2 instructions in the peephole (addis and load), so we want to
37926 check if the target register was not used in the memory address and the
37927 register to hold the addis result is dead after the peephole. */
37928 if (REGNO (addis_reg) != REGNO (target))
37930 if (reg_mentioned_p (target, mem))
37931 return false;
37933 if (!peep2_reg_dead_p (2, addis_reg))
37934 return false;
37936 /* If the target register being loaded is the stack pointer, we must
37937 avoid loading any other value into it, even temporarily. */
37938 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37939 return false;
37942 base_reg = XEXP (addr, 0);
37943 return REGNO (addis_reg) == REGNO (base_reg);
37946 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37947 sequence. We adjust the addis register to use the target register. If the
37948 load sign extends, we adjust the code to do the zero extending load, and an
37949 explicit sign extension later since the fusion only covers zero extending
37950 loads.
37952 The operands are:
37953 operands[0] register set with addis (to be replaced with target)
37954 operands[1] value set via addis
37955 operands[2] target register being loaded
37956 operands[3] D-form memory reference using operands[0]. */
37958 void
37959 expand_fusion_gpr_load (rtx *operands)
37961 rtx addis_value = operands[1];
37962 rtx target = operands[2];
37963 rtx orig_mem = operands[3];
37964 rtx new_addr, new_mem, orig_addr, offset;
37965 enum rtx_code plus_or_lo_sum;
37966 machine_mode target_mode = GET_MODE (target);
37967 machine_mode extend_mode = target_mode;
37968 machine_mode ptr_mode = Pmode;
37969 enum rtx_code extend = UNKNOWN;
37971 if (GET_CODE (orig_mem) == ZERO_EXTEND
37972 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37974 extend = GET_CODE (orig_mem);
37975 orig_mem = XEXP (orig_mem, 0);
37976 target_mode = GET_MODE (orig_mem);
37979 gcc_assert (MEM_P (orig_mem));
37981 orig_addr = XEXP (orig_mem, 0);
37982 plus_or_lo_sum = GET_CODE (orig_addr);
37983 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37985 offset = XEXP (orig_addr, 1);
37986 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37987 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37989 if (extend != UNKNOWN)
37990 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
37992 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37993 UNSPEC_FUSION_GPR);
37994 emit_insn (gen_rtx_SET (target, new_mem));
37996 if (extend == SIGN_EXTEND)
37998 int sub_off = ((BYTES_BIG_ENDIAN)
37999 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38000 : 0);
38001 rtx sign_reg
38002 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38004 emit_insn (gen_rtx_SET (target,
38005 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38008 return;
38011 /* Emit the addis instruction that will be part of a fused instruction
38012 sequence. */
38014 void
38015 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
38016 const char *mode_name)
38018 rtx fuse_ops[10];
38019 char insn_template[80];
38020 const char *addis_str = NULL;
38021 const char *comment_str = ASM_COMMENT_START;
38023 if (*comment_str == ' ')
38024 comment_str++;
38026 /* Emit the addis instruction. */
38027 fuse_ops[0] = target;
38028 if (satisfies_constraint_L (addis_value))
38030 fuse_ops[1] = addis_value;
38031 addis_str = "lis %0,%v1";
38034 else if (GET_CODE (addis_value) == PLUS)
38036 rtx op0 = XEXP (addis_value, 0);
38037 rtx op1 = XEXP (addis_value, 1);
38039 if (REG_P (op0) && CONST_INT_P (op1)
38040 && satisfies_constraint_L (op1))
38042 fuse_ops[1] = op0;
38043 fuse_ops[2] = op1;
38044 addis_str = "addis %0,%1,%v2";
38048 else if (GET_CODE (addis_value) == HIGH)
38050 rtx value = XEXP (addis_value, 0);
38051 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38053 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38054 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38055 if (TARGET_ELF)
38056 addis_str = "addis %0,%2,%1@toc@ha";
38058 else if (TARGET_XCOFF)
38059 addis_str = "addis %0,%1@u(%2)";
38061 else
38062 gcc_unreachable ();
38065 else if (GET_CODE (value) == PLUS)
38067 rtx op0 = XEXP (value, 0);
38068 rtx op1 = XEXP (value, 1);
38070 if (GET_CODE (op0) == UNSPEC
38071 && XINT (op0, 1) == UNSPEC_TOCREL
38072 && CONST_INT_P (op1))
38074 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38075 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38076 fuse_ops[3] = op1;
38077 if (TARGET_ELF)
38078 addis_str = "addis %0,%2,%1+%3@toc@ha";
38080 else if (TARGET_XCOFF)
38081 addis_str = "addis %0,%1+%3@u(%2)";
38083 else
38084 gcc_unreachable ();
38088 else if (satisfies_constraint_L (value))
38090 fuse_ops[1] = value;
38091 addis_str = "lis %0,%v1";
38094 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38096 fuse_ops[1] = value;
38097 addis_str = "lis %0,%1@ha";
38101 if (!addis_str)
38102 fatal_insn ("Could not generate addis value for fusion", addis_value);
38104 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
38105 comment, mode_name);
38106 output_asm_insn (insn_template, fuse_ops);
38109 /* Emit a D-form load or store instruction that is the second instruction
38110 of a fusion sequence. */
38112 void
38113 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38114 const char *insn_str)
38116 rtx fuse_ops[10];
38117 char insn_template[80];
38119 fuse_ops[0] = load_store_reg;
38120 fuse_ops[1] = addis_reg;
38122 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38124 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38125 fuse_ops[2] = offset;
38126 output_asm_insn (insn_template, fuse_ops);
38129 else if (GET_CODE (offset) == UNSPEC
38130 && XINT (offset, 1) == UNSPEC_TOCREL)
38132 if (TARGET_ELF)
38133 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38135 else if (TARGET_XCOFF)
38136 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38138 else
38139 gcc_unreachable ();
38141 fuse_ops[2] = XVECEXP (offset, 0, 0);
38142 output_asm_insn (insn_template, fuse_ops);
38145 else if (GET_CODE (offset) == PLUS
38146 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38147 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38148 && CONST_INT_P (XEXP (offset, 1)))
38150 rtx tocrel_unspec = XEXP (offset, 0);
38151 if (TARGET_ELF)
38152 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38154 else if (TARGET_XCOFF)
38155 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38157 else
38158 gcc_unreachable ();
38160 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38161 fuse_ops[3] = XEXP (offset, 1);
38162 output_asm_insn (insn_template, fuse_ops);
38165 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38167 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38169 fuse_ops[2] = offset;
38170 output_asm_insn (insn_template, fuse_ops);
38173 else
38174 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38176 return;
38179 /* Wrap a TOC address that can be fused to indicate that special fusion
38180 processing is needed. */
38183 fusion_wrap_memory_address (rtx old_mem)
38185 rtx old_addr = XEXP (old_mem, 0);
38186 rtvec v = gen_rtvec (1, old_addr);
38187 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38188 return replace_equiv_address_nv (old_mem, new_addr, false);
38191 /* Given an address, convert it into the addis and load offset parts. Addresses
38192 created during the peephole2 process look like:
38193 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38194 (unspec [(...)] UNSPEC_TOCREL))
38196 Addresses created via toc fusion look like:
38197 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38199 static void
38200 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38202 rtx hi, lo;
38204 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38206 lo = XVECEXP (addr, 0, 0);
38207 hi = gen_rtx_HIGH (Pmode, lo);
38209 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38211 hi = XEXP (addr, 0);
38212 lo = XEXP (addr, 1);
38214 else
38215 gcc_unreachable ();
38217 *p_hi = hi;
38218 *p_lo = lo;
38221 /* Return a string to fuse an addis instruction with a gpr load to the same
38222 register that we loaded up the addis instruction. The address that is used
38223 is the logical address that was formed during peephole2:
38224 (lo_sum (high) (low-part))
38226 Or the address is the TOC address that is wrapped before register allocation:
38227 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38229 The code is complicated, so we call output_asm_insn directly, and just
38230 return "". */
38232 const char *
38233 emit_fusion_gpr_load (rtx target, rtx mem)
38235 rtx addis_value;
38236 rtx addr;
38237 rtx load_offset;
38238 const char *load_str = NULL;
38239 const char *mode_name = NULL;
38240 machine_mode mode;
38242 if (GET_CODE (mem) == ZERO_EXTEND)
38243 mem = XEXP (mem, 0);
38245 gcc_assert (REG_P (target) && MEM_P (mem));
38247 addr = XEXP (mem, 0);
38248 fusion_split_address (addr, &addis_value, &load_offset);
38250 /* Now emit the load instruction to the same register. */
38251 mode = GET_MODE (mem);
38252 switch (mode)
38254 case QImode:
38255 mode_name = "char";
38256 load_str = "lbz";
38257 break;
38259 case HImode:
38260 mode_name = "short";
38261 load_str = "lhz";
38262 break;
38264 case SImode:
38265 case SFmode:
38266 mode_name = (mode == SFmode) ? "float" : "int";
38267 load_str = "lwz";
38268 break;
38270 case DImode:
38271 case DFmode:
38272 gcc_assert (TARGET_POWERPC64);
38273 mode_name = (mode == DFmode) ? "double" : "long";
38274 load_str = "ld";
38275 break;
38277 default:
38278 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38281 /* Emit the addis instruction. */
38282 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
38284 /* Emit the D-form load instruction. */
38285 emit_fusion_load_store (target, target, load_offset, load_str);
38287 return "";
38291 /* Return true if the peephole2 can combine a load/store involving a
38292 combination of an addis instruction and the memory operation. This was
38293 added to the ISA 3.0 (power9) hardware. */
38295 bool
38296 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38297 rtx addis_value, /* addis value. */
38298 rtx dest, /* destination (memory or register). */
38299 rtx src) /* source (register or memory). */
38301 rtx addr, mem, offset;
38302 machine_mode mode = GET_MODE (src);
38304 /* Validate arguments. */
38305 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38306 return false;
38308 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38309 return false;
38311 /* Ignore extend operations that are part of the load. */
38312 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38313 src = XEXP (src, 0);
38315 /* Test for memory<-register or register<-memory. */
38316 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38318 if (!MEM_P (dest))
38319 return false;
38321 mem = dest;
38324 else if (MEM_P (src))
38326 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38327 return false;
38329 mem = src;
38332 else
38333 return false;
38335 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38336 if (GET_CODE (addr) == PLUS)
38338 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38339 return false;
38341 return satisfies_constraint_I (XEXP (addr, 1));
38344 else if (GET_CODE (addr) == LO_SUM)
38346 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38347 return false;
38349 offset = XEXP (addr, 1);
38350 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38351 return small_toc_ref (offset, GET_MODE (offset));
38353 else if (TARGET_ELF && !TARGET_POWERPC64)
38354 return CONSTANT_P (offset);
38357 return false;
38360 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38361 load sequence.
38363 The operands are:
38364 operands[0] register set with addis
38365 operands[1] value set via addis
38366 operands[2] target register being loaded
38367 operands[3] D-form memory reference using operands[0].
38369 This is similar to the fusion introduced with power8, except it scales to
38370 both loads/stores and does not require the result register to be the same as
38371 the base register. At the moment, we only do this if register set with addis
38372 is dead. */
38374 void
38375 expand_fusion_p9_load (rtx *operands)
38377 rtx tmp_reg = operands[0];
38378 rtx addis_value = operands[1];
38379 rtx target = operands[2];
38380 rtx orig_mem = operands[3];
38381 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38382 enum rtx_code plus_or_lo_sum;
38383 machine_mode target_mode = GET_MODE (target);
38384 machine_mode extend_mode = target_mode;
38385 machine_mode ptr_mode = Pmode;
38386 enum rtx_code extend = UNKNOWN;
38388 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38390 extend = GET_CODE (orig_mem);
38391 orig_mem = XEXP (orig_mem, 0);
38392 target_mode = GET_MODE (orig_mem);
38395 gcc_assert (MEM_P (orig_mem));
38397 orig_addr = XEXP (orig_mem, 0);
38398 plus_or_lo_sum = GET_CODE (orig_addr);
38399 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38401 offset = XEXP (orig_addr, 1);
38402 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38403 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38405 if (extend != UNKNOWN)
38406 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38408 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38409 UNSPEC_FUSION_P9);
38411 set = gen_rtx_SET (target, new_mem);
38412 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38413 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38414 emit_insn (insn);
38416 return;
38419 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38420 store sequence.
38422 The operands are:
38423 operands[0] register set with addis
38424 operands[1] value set via addis
38425 operands[2] target D-form memory being stored to
38426 operands[3] register being stored
38428 This is similar to the fusion introduced with power8, except it scales to
38429 both loads/stores and does not require the result register to be the same as
38430 the base register. At the moment, we only do this if register set with addis
38431 is dead. */
38433 void
38434 expand_fusion_p9_store (rtx *operands)
38436 rtx tmp_reg = operands[0];
38437 rtx addis_value = operands[1];
38438 rtx orig_mem = operands[2];
38439 rtx src = operands[3];
38440 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38441 enum rtx_code plus_or_lo_sum;
38442 machine_mode target_mode = GET_MODE (orig_mem);
38443 machine_mode ptr_mode = Pmode;
38445 gcc_assert (MEM_P (orig_mem));
38447 orig_addr = XEXP (orig_mem, 0);
38448 plus_or_lo_sum = GET_CODE (orig_addr);
38449 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38451 offset = XEXP (orig_addr, 1);
38452 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38453 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38455 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38456 UNSPEC_FUSION_P9);
38458 set = gen_rtx_SET (new_mem, new_src);
38459 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38460 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38461 emit_insn (insn);
38463 return;
38466 /* Return a string to fuse an addis instruction with a load using extended
38467 fusion. The address that is used is the logical address that was formed
38468 during peephole2: (lo_sum (high) (low-part))
38470 The code is complicated, so we call output_asm_insn directly, and just
38471 return "". */
38473 const char *
38474 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38476 machine_mode mode = GET_MODE (reg);
38477 rtx hi;
38478 rtx lo;
38479 rtx addr;
38480 const char *load_string;
38481 int r;
38483 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38485 mem = XEXP (mem, 0);
38486 mode = GET_MODE (mem);
38489 if (GET_CODE (reg) == SUBREG)
38491 gcc_assert (SUBREG_BYTE (reg) == 0);
38492 reg = SUBREG_REG (reg);
38495 if (!REG_P (reg))
38496 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38498 r = REGNO (reg);
38499 if (FP_REGNO_P (r))
38501 if (mode == SFmode)
38502 load_string = "lfs";
38503 else if (mode == DFmode || mode == DImode)
38504 load_string = "lfd";
38505 else
38506 gcc_unreachable ();
38508 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_DFORM_SCALAR)
38510 if (mode == SFmode)
38511 load_string = "lxssp";
38512 else if (mode == DFmode || mode == DImode)
38513 load_string = "lxsd";
38514 else
38515 gcc_unreachable ();
38517 else if (INT_REGNO_P (r))
38519 switch (mode)
38521 case QImode:
38522 load_string = "lbz";
38523 break;
38524 case HImode:
38525 load_string = "lhz";
38526 break;
38527 case SImode:
38528 case SFmode:
38529 load_string = "lwz";
38530 break;
38531 case DImode:
38532 case DFmode:
38533 if (!TARGET_POWERPC64)
38534 gcc_unreachable ();
38535 load_string = "ld";
38536 break;
38537 default:
38538 gcc_unreachable ();
38541 else
38542 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38544 if (!MEM_P (mem))
38545 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38547 addr = XEXP (mem, 0);
38548 fusion_split_address (addr, &hi, &lo);
38550 /* Emit the addis instruction. */
38551 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
38553 /* Emit the D-form load instruction. */
38554 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38556 return "";
38559 /* Return a string to fuse an addis instruction with a store using extended
38560 fusion. The address that is used is the logical address that was formed
38561 during peephole2: (lo_sum (high) (low-part))
38563 The code is complicated, so we call output_asm_insn directly, and just
38564 return "". */
38566 const char *
38567 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38569 machine_mode mode = GET_MODE (reg);
38570 rtx hi;
38571 rtx lo;
38572 rtx addr;
38573 const char *store_string;
38574 int r;
38576 if (GET_CODE (reg) == SUBREG)
38578 gcc_assert (SUBREG_BYTE (reg) == 0);
38579 reg = SUBREG_REG (reg);
38582 if (!REG_P (reg))
38583 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38585 r = REGNO (reg);
38586 if (FP_REGNO_P (r))
38588 if (mode == SFmode)
38589 store_string = "stfs";
38590 else if (mode == DFmode)
38591 store_string = "stfd";
38592 else
38593 gcc_unreachable ();
38595 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_DFORM_SCALAR)
38597 if (mode == SFmode)
38598 store_string = "stxssp";
38599 else if (mode == DFmode || mode == DImode)
38600 store_string = "stxsd";
38601 else
38602 gcc_unreachable ();
38604 else if (INT_REGNO_P (r))
38606 switch (mode)
38608 case QImode:
38609 store_string = "stb";
38610 break;
38611 case HImode:
38612 store_string = "sth";
38613 break;
38614 case SImode:
38615 case SFmode:
38616 store_string = "stw";
38617 break;
38618 case DImode:
38619 case DFmode:
38620 if (!TARGET_POWERPC64)
38621 gcc_unreachable ();
38622 store_string = "std";
38623 break;
38624 default:
38625 gcc_unreachable ();
38628 else
38629 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38631 if (!MEM_P (mem))
38632 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38634 addr = XEXP (mem, 0);
38635 fusion_split_address (addr, &hi, &lo);
38637 /* Emit the addis instruction. */
38638 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
38640 /* Emit the D-form load instruction. */
38641 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38643 return "";
38646 #ifdef RS6000_GLIBC_ATOMIC_FENV
38647 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38648 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38649 #endif
38651 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38653 static void
38654 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38656 if (!TARGET_HARD_FLOAT)
38658 #ifdef RS6000_GLIBC_ATOMIC_FENV
38659 if (atomic_hold_decl == NULL_TREE)
38661 atomic_hold_decl
38662 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38663 get_identifier ("__atomic_feholdexcept"),
38664 build_function_type_list (void_type_node,
38665 double_ptr_type_node,
38666 NULL_TREE));
38667 TREE_PUBLIC (atomic_hold_decl) = 1;
38668 DECL_EXTERNAL (atomic_hold_decl) = 1;
38671 if (atomic_clear_decl == NULL_TREE)
38673 atomic_clear_decl
38674 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38675 get_identifier ("__atomic_feclearexcept"),
38676 build_function_type_list (void_type_node,
38677 NULL_TREE));
38678 TREE_PUBLIC (atomic_clear_decl) = 1;
38679 DECL_EXTERNAL (atomic_clear_decl) = 1;
38682 tree const_double = build_qualified_type (double_type_node,
38683 TYPE_QUAL_CONST);
38684 tree const_double_ptr = build_pointer_type (const_double);
38685 if (atomic_update_decl == NULL_TREE)
38687 atomic_update_decl
38688 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38689 get_identifier ("__atomic_feupdateenv"),
38690 build_function_type_list (void_type_node,
38691 const_double_ptr,
38692 NULL_TREE));
38693 TREE_PUBLIC (atomic_update_decl) = 1;
38694 DECL_EXTERNAL (atomic_update_decl) = 1;
38697 tree fenv_var = create_tmp_var_raw (double_type_node);
38698 TREE_ADDRESSABLE (fenv_var) = 1;
38699 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38701 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38702 *clear = build_call_expr (atomic_clear_decl, 0);
38703 *update = build_call_expr (atomic_update_decl, 1,
38704 fold_convert (const_double_ptr, fenv_addr));
38705 #endif
38706 return;
38709 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38710 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38711 tree call_mffs = build_call_expr (mffs, 0);
38713 /* Generates the equivalent of feholdexcept (&fenv_var)
38715 *fenv_var = __builtin_mffs ();
38716 double fenv_hold;
38717 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38718 __builtin_mtfsf (0xff, fenv_hold); */
38720 /* Mask to clear everything except for the rounding modes and non-IEEE
38721 arithmetic flag. */
38722 const unsigned HOST_WIDE_INT hold_exception_mask =
38723 HOST_WIDE_INT_C (0xffffffff00000007);
38725 tree fenv_var = create_tmp_var_raw (double_type_node);
38727 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38729 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38730 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38731 build_int_cst (uint64_type_node,
38732 hold_exception_mask));
38734 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38735 fenv_llu_and);
38737 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38738 build_int_cst (unsigned_type_node, 0xff),
38739 fenv_hold_mtfsf);
38741 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38743 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38745 double fenv_clear = __builtin_mffs ();
38746 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38747 __builtin_mtfsf (0xff, fenv_clear); */
38749 /* Mask to clear everything except for the rounding modes and non-IEEE
38750 arithmetic flag. */
38751 const unsigned HOST_WIDE_INT clear_exception_mask =
38752 HOST_WIDE_INT_C (0xffffffff00000000);
38754 tree fenv_clear = create_tmp_var_raw (double_type_node);
38756 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38758 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38759 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38760 fenv_clean_llu,
38761 build_int_cst (uint64_type_node,
38762 clear_exception_mask));
38764 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38765 fenv_clear_llu_and);
38767 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38768 build_int_cst (unsigned_type_node, 0xff),
38769 fenv_clear_mtfsf);
38771 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38773 /* Generates the equivalent of feupdateenv (&fenv_var)
38775 double old_fenv = __builtin_mffs ();
38776 double fenv_update;
38777 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38778 (*(uint64_t*)fenv_var 0x1ff80fff);
38779 __builtin_mtfsf (0xff, fenv_update); */
38781 const unsigned HOST_WIDE_INT update_exception_mask =
38782 HOST_WIDE_INT_C (0xffffffff1fffff00);
38783 const unsigned HOST_WIDE_INT new_exception_mask =
38784 HOST_WIDE_INT_C (0x1ff80fff);
38786 tree old_fenv = create_tmp_var_raw (double_type_node);
38787 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38789 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38790 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38791 build_int_cst (uint64_type_node,
38792 update_exception_mask));
38794 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38795 build_int_cst (uint64_type_node,
38796 new_exception_mask));
38798 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38799 old_llu_and, new_llu_and);
38801 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38802 new_llu_mask);
38804 tree update_mtfsf = build_call_expr (mtfsf, 2,
38805 build_int_cst (unsigned_type_node, 0xff),
38806 fenv_update_mtfsf);
38808 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38811 void
38812 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
38814 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38816 rtx_tmp0 = gen_reg_rtx (V2DImode);
38817 rtx_tmp1 = gen_reg_rtx (V2DImode);
38819 /* The destination of the vmrgew instruction layout is:
38820 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38821 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38822 vmrgew instruction will be correct. */
38823 if (VECTOR_ELT_ORDER_BIG)
38825 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
38826 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
38828 else
38830 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
38831 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
38834 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38835 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38837 if (signed_convert)
38839 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
38840 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
38842 else
38844 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
38845 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
38848 if (VECTOR_ELT_ORDER_BIG)
38849 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38850 else
38851 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38854 void
38855 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
38856 rtx src2)
38858 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38860 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38861 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38863 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
38864 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
38866 rtx_tmp2 = gen_reg_rtx (V4SImode);
38867 rtx_tmp3 = gen_reg_rtx (V4SImode);
38869 if (signed_convert)
38871 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
38872 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
38874 else
38876 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
38877 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
38880 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
38883 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
38885 static bool
38886 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
38887 optimization_type opt_type)
38889 switch (op)
38891 case rsqrt_optab:
38892 return (opt_type == OPTIMIZE_FOR_SPEED
38893 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
38895 default:
38896 return true;
38900 struct gcc_target targetm = TARGET_INITIALIZER;
38902 #include "gt-rs6000.h"