[gcc]
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob8a3e7994c9596e5fd79eba276105ab7479f05926
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2016 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "cfgloop.h"
30 #include "df.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "expmed.h"
34 #include "optabs.h"
35 #include "regs.h"
36 #include "ira.h"
37 #include "recog.h"
38 #include "cgraph.h"
39 #include "diagnostic-core.h"
40 #include "insn-attr.h"
41 #include "flags.h"
42 #include "alias.h"
43 #include "fold-const.h"
44 #include "stor-layout.h"
45 #include "calls.h"
46 #include "print-tree.h"
47 #include "varasm.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "output.h"
51 #include "dbxout.h"
52 #include "common/common-target.h"
53 #include "langhooks.h"
54 #include "reload.h"
55 #include "sched-int.h"
56 #include "gimplify.h"
57 #include "gimple-iterator.h"
58 #include "gimple-walk.h"
59 #include "intl.h"
60 #include "params.h"
61 #include "tm-constrs.h"
62 #include "tree-vectorizer.h"
63 #include "target-globals.h"
64 #include "builtins.h"
65 #include "context.h"
66 #include "tree-pass.h"
67 #if TARGET_XCOFF
68 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
69 #endif
70 #if TARGET_MACHO
71 #include "gstab.h" /* for N_SLINE */
72 #endif
73 #include "case-cfn-macros.h"
74 #include "ppc-auxv.h"
76 /* This file should be included last. */
77 #include "target-def.h"
79 #ifndef TARGET_NO_PROTOTYPE
80 #define TARGET_NO_PROTOTYPE 0
81 #endif
83 #define min(A,B) ((A) < (B) ? (A) : (B))
84 #define max(A,B) ((A) > (B) ? (A) : (B))
86 /* Structure used to define the rs6000 stack */
87 typedef struct rs6000_stack {
88 int reload_completed; /* stack info won't change from here on */
89 int first_gp_reg_save; /* first callee saved GP register used */
90 int first_fp_reg_save; /* first callee saved FP register used */
91 int first_altivec_reg_save; /* first callee saved AltiVec register used */
92 int lr_save_p; /* true if the link reg needs to be saved */
93 int cr_save_p; /* true if the CR reg needs to be saved */
94 unsigned int vrsave_mask; /* mask of vec registers to save */
95 int push_p; /* true if we need to allocate stack space */
96 int calls_p; /* true if the function makes any calls */
97 int world_save_p; /* true if we're saving *everything*:
98 r13-r31, cr, f14-f31, vrsave, v20-v31 */
99 enum rs6000_abi abi; /* which ABI to use */
100 int gp_save_offset; /* offset to save GP regs from initial SP */
101 int fp_save_offset; /* offset to save FP regs from initial SP */
102 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
103 int lr_save_offset; /* offset to save LR from initial SP */
104 int cr_save_offset; /* offset to save CR from initial SP */
105 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
106 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
107 int varargs_save_offset; /* offset to save the varargs registers */
108 int ehrd_offset; /* offset to EH return data */
109 int ehcr_offset; /* offset to EH CR field data */
110 int reg_size; /* register size (4 or 8) */
111 HOST_WIDE_INT vars_size; /* variable save area size */
112 int parm_size; /* outgoing parameter size */
113 int save_size; /* save area size */
114 int fixed_size; /* fixed size of stack frame */
115 int gp_size; /* size of saved GP registers */
116 int fp_size; /* size of saved FP registers */
117 int altivec_size; /* size of saved AltiVec registers */
118 int cr_size; /* size to hold CR if not in fixed area */
119 int vrsave_size; /* size to hold VRSAVE */
120 int altivec_padding_size; /* size of altivec alignment padding */
121 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
122 int spe_padding_size;
123 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
124 int spe_64bit_regs_used;
125 int savres_strategy;
126 } rs6000_stack_t;
128 /* A C structure for machine-specific, per-function data.
129 This is added to the cfun structure. */
130 typedef struct GTY(()) machine_function
132 /* Whether the instruction chain has been scanned already. */
133 int spe_insn_chain_scanned_p;
134 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
135 int ra_needs_full_frame;
136 /* Flags if __builtin_return_address (0) was used. */
137 int ra_need_lr;
138 /* Cache lr_save_p after expansion of builtin_eh_return. */
139 int lr_save_state;
140 /* Whether we need to save the TOC to the reserved stack location in the
141 function prologue. */
142 bool save_toc_in_prologue;
143 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
144 varargs save area. */
145 HOST_WIDE_INT varargs_save_offset;
146 /* Temporary stack slot to use for SDmode copies. This slot is
147 64-bits wide and is allocated early enough so that the offset
148 does not overflow the 16-bit load/store offset field. */
149 rtx sdmode_stack_slot;
150 /* Alternative internal arg pointer for -fsplit-stack. */
151 rtx split_stack_arg_pointer;
152 bool split_stack_argp_used;
153 /* Flag if r2 setup is needed with ELFv2 ABI. */
154 bool r2_setup_needed;
155 } machine_function;
157 /* Support targetm.vectorize.builtin_mask_for_load. */
158 static GTY(()) tree altivec_builtin_mask_for_load;
160 /* Set to nonzero once AIX common-mode calls have been defined. */
161 static GTY(()) int common_mode_defined;
163 /* Label number of label created for -mrelocatable, to call to so we can
164 get the address of the GOT section */
165 static int rs6000_pic_labelno;
167 #ifdef USING_ELFOS_H
168 /* Counter for labels which are to be placed in .fixup. */
169 int fixuplabelno = 0;
170 #endif
172 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
173 int dot_symbols;
175 /* Specify the machine mode that pointers have. After generation of rtl, the
176 compiler makes no further distinction between pointers and any other objects
177 of this machine mode. The type is unsigned since not all things that
178 include rs6000.h also include machmode.h. */
179 unsigned rs6000_pmode;
181 /* Width in bits of a pointer. */
182 unsigned rs6000_pointer_size;
184 #ifdef HAVE_AS_GNU_ATTRIBUTE
185 /* Flag whether floating point values have been passed/returned. */
186 static bool rs6000_passes_float;
187 /* Flag whether vector values have been passed/returned. */
188 static bool rs6000_passes_vector;
189 /* Flag whether small (<= 8 byte) structures have been returned. */
190 static bool rs6000_returns_struct;
191 #endif
193 /* Value is TRUE if register/mode pair is acceptable. */
194 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
196 /* Maximum number of registers needed for a given register class and mode. */
197 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
199 /* How many registers are needed for a given register and mode. */
200 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
202 /* Map register number to register class. */
203 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
205 static int dbg_cost_ctrl;
207 /* Built in types. */
208 tree rs6000_builtin_types[RS6000_BTI_MAX];
209 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
211 /* Flag to say the TOC is initialized */
212 int toc_initialized, need_toc_init;
213 char toc_label_name[10];
215 /* Cached value of rs6000_variable_issue. This is cached in
216 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
217 static short cached_can_issue_more;
219 static GTY(()) section *read_only_data_section;
220 static GTY(()) section *private_data_section;
221 static GTY(()) section *tls_data_section;
222 static GTY(()) section *tls_private_data_section;
223 static GTY(()) section *read_only_private_data_section;
224 static GTY(()) section *sdata2_section;
225 static GTY(()) section *toc_section;
227 struct builtin_description
229 const HOST_WIDE_INT mask;
230 const enum insn_code icode;
231 const char *const name;
232 const enum rs6000_builtins code;
235 /* Describe the vector unit used for modes. */
236 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
237 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
239 /* Register classes for various constraints that are based on the target
240 switches. */
241 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
243 /* Describe the alignment of a vector. */
244 int rs6000_vector_align[NUM_MACHINE_MODES];
246 /* Map selected modes to types for builtins. */
247 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
249 /* What modes to automatically generate reciprocal divide estimate (fre) and
250 reciprocal sqrt (frsqrte) for. */
251 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
253 /* Masks to determine which reciprocal esitmate instructions to generate
254 automatically. */
255 enum rs6000_recip_mask {
256 RECIP_SF_DIV = 0x001, /* Use divide estimate */
257 RECIP_DF_DIV = 0x002,
258 RECIP_V4SF_DIV = 0x004,
259 RECIP_V2DF_DIV = 0x008,
261 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
262 RECIP_DF_RSQRT = 0x020,
263 RECIP_V4SF_RSQRT = 0x040,
264 RECIP_V2DF_RSQRT = 0x080,
266 /* Various combination of flags for -mrecip=xxx. */
267 RECIP_NONE = 0,
268 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
269 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
270 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
272 RECIP_HIGH_PRECISION = RECIP_ALL,
274 /* On low precision machines like the power5, don't enable double precision
275 reciprocal square root estimate, since it isn't accurate enough. */
276 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
279 /* -mrecip options. */
280 static struct
282 const char *string; /* option name */
283 unsigned int mask; /* mask bits to set */
284 } recip_options[] = {
285 { "all", RECIP_ALL },
286 { "none", RECIP_NONE },
287 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
288 | RECIP_V2DF_DIV) },
289 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
290 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
291 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
292 | RECIP_V2DF_RSQRT) },
293 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
294 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
297 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
298 static const struct
300 const char *cpu;
301 unsigned int cpuid;
302 } cpu_is_info[] = {
303 { "power9", PPC_PLATFORM_POWER9 },
304 { "power8", PPC_PLATFORM_POWER8 },
305 { "power7", PPC_PLATFORM_POWER7 },
306 { "power6x", PPC_PLATFORM_POWER6X },
307 { "power6", PPC_PLATFORM_POWER6 },
308 { "power5+", PPC_PLATFORM_POWER5_PLUS },
309 { "power5", PPC_PLATFORM_POWER5 },
310 { "ppc970", PPC_PLATFORM_PPC970 },
311 { "power4", PPC_PLATFORM_POWER4 },
312 { "ppca2", PPC_PLATFORM_PPCA2 },
313 { "ppc476", PPC_PLATFORM_PPC476 },
314 { "ppc464", PPC_PLATFORM_PPC464 },
315 { "ppc440", PPC_PLATFORM_PPC440 },
316 { "ppc405", PPC_PLATFORM_PPC405 },
317 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
320 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
321 static const struct
323 const char *hwcap;
324 int mask;
325 unsigned int id;
326 } cpu_supports_info[] = {
327 /* AT_HWCAP masks. */
328 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
329 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
330 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
331 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
332 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
333 { "booke", PPC_FEATURE_BOOKE, 0 },
334 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
335 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
336 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
337 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
338 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
339 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
340 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
341 { "notb", PPC_FEATURE_NO_TB, 0 },
342 { "pa6t", PPC_FEATURE_PA6T, 0 },
343 { "power4", PPC_FEATURE_POWER4, 0 },
344 { "power5", PPC_FEATURE_POWER5, 0 },
345 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
346 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
347 { "ppc32", PPC_FEATURE_32, 0 },
348 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
349 { "ppc64", PPC_FEATURE_64, 0 },
350 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
351 { "smt", PPC_FEATURE_SMT, 0 },
352 { "spe", PPC_FEATURE_HAS_SPE, 0 },
353 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
354 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
355 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
357 /* AT_HWCAP2 masks. */
358 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
359 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
360 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
361 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
362 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
363 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
364 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
365 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
366 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
367 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 }
370 /* Newer LIBCs explicitly export this symbol to declare that they provide
371 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
372 reference to this symbol whenever we expand a CPU builtin, so that
373 we never link against an old LIBC. */
374 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
376 /* True if we have expanded a CPU builtin. */
377 bool cpu_builtin_p;
379 /* Pointer to function (in rs6000-c.c) that can define or undefine target
380 macros that have changed. Languages that don't support the preprocessor
381 don't link in rs6000-c.c, so we can't call it directly. */
382 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
384 /* Simplfy register classes into simpler classifications. We assume
385 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
386 check for standard register classes (gpr/floating/altivec/vsx) and
387 floating/vector classes (float/altivec/vsx). */
389 enum rs6000_reg_type {
390 NO_REG_TYPE,
391 PSEUDO_REG_TYPE,
392 GPR_REG_TYPE,
393 VSX_REG_TYPE,
394 ALTIVEC_REG_TYPE,
395 FPR_REG_TYPE,
396 SPR_REG_TYPE,
397 CR_REG_TYPE,
398 SPE_ACC_TYPE,
399 SPEFSCR_REG_TYPE
402 /* Map register class to register type. */
403 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
405 /* First/last register type for the 'normal' register types (i.e. general
406 purpose, floating point, altivec, and VSX registers). */
407 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
409 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
412 /* Register classes we care about in secondary reload or go if legitimate
413 address. We only need to worry about GPR, FPR, and Altivec registers here,
414 along an ANY field that is the OR of the 3 register classes. */
416 enum rs6000_reload_reg_type {
417 RELOAD_REG_GPR, /* General purpose registers. */
418 RELOAD_REG_FPR, /* Traditional floating point regs. */
419 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
420 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
421 N_RELOAD_REG
424 /* For setting up register classes, loop through the 3 register classes mapping
425 into real registers, and skip the ANY class, which is just an OR of the
426 bits. */
427 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
428 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
430 /* Map reload register type to a register in the register class. */
431 struct reload_reg_map_type {
432 const char *name; /* Register class name. */
433 int reg; /* Register in the register class. */
436 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
437 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
438 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
439 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
440 { "Any", -1 }, /* RELOAD_REG_ANY. */
443 /* Mask bits for each register class, indexed per mode. Historically the
444 compiler has been more restrictive which types can do PRE_MODIFY instead of
445 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
446 typedef unsigned char addr_mask_type;
448 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
449 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
450 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
451 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
452 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
453 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
454 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
455 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
457 /* Register type masks based on the type, of valid addressing modes. */
458 struct rs6000_reg_addr {
459 enum insn_code reload_load; /* INSN to reload for loading. */
460 enum insn_code reload_store; /* INSN to reload for storing. */
461 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
462 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
463 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
464 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
465 /* INSNs for fusing addi with loads
466 or stores for each reg. class. */
467 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
468 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
469 /* INSNs for fusing addis with loads
470 or stores for each reg. class. */
471 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
472 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
473 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
474 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
475 bool fused_toc; /* Mode supports TOC fusion. */
478 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
480 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
481 static inline bool
482 mode_supports_pre_incdec_p (machine_mode mode)
484 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
485 != 0);
488 /* Helper function to say whether a mode supports PRE_MODIFY. */
489 static inline bool
490 mode_supports_pre_modify_p (machine_mode mode)
492 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
493 != 0);
496 /* Return true if we have D-form addressing in altivec registers. */
497 static inline bool
498 mode_supports_vmx_dform (machine_mode mode)
500 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
503 /* Return true if we have D-form addressing in VSX registers. This addressing
504 is more limited than normal d-form addressing in that the offset must be
505 aligned on a 16-byte boundary. */
506 static inline bool
507 mode_supports_vsx_dform_quad (machine_mode mode)
509 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
510 != 0);
514 /* Target cpu costs. */
516 struct processor_costs {
517 const int mulsi; /* cost of SImode multiplication. */
518 const int mulsi_const; /* cost of SImode multiplication by constant. */
519 const int mulsi_const9; /* cost of SImode mult by short constant. */
520 const int muldi; /* cost of DImode multiplication. */
521 const int divsi; /* cost of SImode division. */
522 const int divdi; /* cost of DImode division. */
523 const int fp; /* cost of simple SFmode and DFmode insns. */
524 const int dmul; /* cost of DFmode multiplication (and fmadd). */
525 const int sdiv; /* cost of SFmode division (fdivs). */
526 const int ddiv; /* cost of DFmode division (fdiv). */
527 const int cache_line_size; /* cache line size in bytes. */
528 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
529 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
530 const int simultaneous_prefetches; /* number of parallel prefetch
531 operations. */
532 const int sfdf_convert; /* cost of SF->DF conversion. */
535 const struct processor_costs *rs6000_cost;
537 /* Processor costs (relative to an add) */
539 /* Instruction size costs on 32bit processors. */
540 static const
541 struct processor_costs size32_cost = {
542 COSTS_N_INSNS (1), /* mulsi */
543 COSTS_N_INSNS (1), /* mulsi_const */
544 COSTS_N_INSNS (1), /* mulsi_const9 */
545 COSTS_N_INSNS (1), /* muldi */
546 COSTS_N_INSNS (1), /* divsi */
547 COSTS_N_INSNS (1), /* divdi */
548 COSTS_N_INSNS (1), /* fp */
549 COSTS_N_INSNS (1), /* dmul */
550 COSTS_N_INSNS (1), /* sdiv */
551 COSTS_N_INSNS (1), /* ddiv */
552 32, /* cache line size */
553 0, /* l1 cache */
554 0, /* l2 cache */
555 0, /* streams */
556 0, /* SF->DF convert */
559 /* Instruction size costs on 64bit processors. */
560 static const
561 struct processor_costs size64_cost = {
562 COSTS_N_INSNS (1), /* mulsi */
563 COSTS_N_INSNS (1), /* mulsi_const */
564 COSTS_N_INSNS (1), /* mulsi_const9 */
565 COSTS_N_INSNS (1), /* muldi */
566 COSTS_N_INSNS (1), /* divsi */
567 COSTS_N_INSNS (1), /* divdi */
568 COSTS_N_INSNS (1), /* fp */
569 COSTS_N_INSNS (1), /* dmul */
570 COSTS_N_INSNS (1), /* sdiv */
571 COSTS_N_INSNS (1), /* ddiv */
572 128, /* cache line size */
573 0, /* l1 cache */
574 0, /* l2 cache */
575 0, /* streams */
576 0, /* SF->DF convert */
579 /* Instruction costs on RS64A processors. */
580 static const
581 struct processor_costs rs64a_cost = {
582 COSTS_N_INSNS (20), /* mulsi */
583 COSTS_N_INSNS (12), /* mulsi_const */
584 COSTS_N_INSNS (8), /* mulsi_const9 */
585 COSTS_N_INSNS (34), /* muldi */
586 COSTS_N_INSNS (65), /* divsi */
587 COSTS_N_INSNS (67), /* divdi */
588 COSTS_N_INSNS (4), /* fp */
589 COSTS_N_INSNS (4), /* dmul */
590 COSTS_N_INSNS (31), /* sdiv */
591 COSTS_N_INSNS (31), /* ddiv */
592 128, /* cache line size */
593 128, /* l1 cache */
594 2048, /* l2 cache */
595 1, /* streams */
596 0, /* SF->DF convert */
599 /* Instruction costs on MPCCORE processors. */
600 static const
601 struct processor_costs mpccore_cost = {
602 COSTS_N_INSNS (2), /* mulsi */
603 COSTS_N_INSNS (2), /* mulsi_const */
604 COSTS_N_INSNS (2), /* mulsi_const9 */
605 COSTS_N_INSNS (2), /* muldi */
606 COSTS_N_INSNS (6), /* divsi */
607 COSTS_N_INSNS (6), /* divdi */
608 COSTS_N_INSNS (4), /* fp */
609 COSTS_N_INSNS (5), /* dmul */
610 COSTS_N_INSNS (10), /* sdiv */
611 COSTS_N_INSNS (17), /* ddiv */
612 32, /* cache line size */
613 4, /* l1 cache */
614 16, /* l2 cache */
615 1, /* streams */
616 0, /* SF->DF convert */
619 /* Instruction costs on PPC403 processors. */
620 static const
621 struct processor_costs ppc403_cost = {
622 COSTS_N_INSNS (4), /* mulsi */
623 COSTS_N_INSNS (4), /* mulsi_const */
624 COSTS_N_INSNS (4), /* mulsi_const9 */
625 COSTS_N_INSNS (4), /* muldi */
626 COSTS_N_INSNS (33), /* divsi */
627 COSTS_N_INSNS (33), /* divdi */
628 COSTS_N_INSNS (11), /* fp */
629 COSTS_N_INSNS (11), /* dmul */
630 COSTS_N_INSNS (11), /* sdiv */
631 COSTS_N_INSNS (11), /* ddiv */
632 32, /* cache line size */
633 4, /* l1 cache */
634 16, /* l2 cache */
635 1, /* streams */
636 0, /* SF->DF convert */
639 /* Instruction costs on PPC405 processors. */
640 static const
641 struct processor_costs ppc405_cost = {
642 COSTS_N_INSNS (5), /* mulsi */
643 COSTS_N_INSNS (4), /* mulsi_const */
644 COSTS_N_INSNS (3), /* mulsi_const9 */
645 COSTS_N_INSNS (5), /* muldi */
646 COSTS_N_INSNS (35), /* divsi */
647 COSTS_N_INSNS (35), /* divdi */
648 COSTS_N_INSNS (11), /* fp */
649 COSTS_N_INSNS (11), /* dmul */
650 COSTS_N_INSNS (11), /* sdiv */
651 COSTS_N_INSNS (11), /* ddiv */
652 32, /* cache line size */
653 16, /* l1 cache */
654 128, /* l2 cache */
655 1, /* streams */
656 0, /* SF->DF convert */
659 /* Instruction costs on PPC440 processors. */
660 static const
661 struct processor_costs ppc440_cost = {
662 COSTS_N_INSNS (3), /* mulsi */
663 COSTS_N_INSNS (2), /* mulsi_const */
664 COSTS_N_INSNS (2), /* mulsi_const9 */
665 COSTS_N_INSNS (3), /* muldi */
666 COSTS_N_INSNS (34), /* divsi */
667 COSTS_N_INSNS (34), /* divdi */
668 COSTS_N_INSNS (5), /* fp */
669 COSTS_N_INSNS (5), /* dmul */
670 COSTS_N_INSNS (19), /* sdiv */
671 COSTS_N_INSNS (33), /* ddiv */
672 32, /* cache line size */
673 32, /* l1 cache */
674 256, /* l2 cache */
675 1, /* streams */
676 0, /* SF->DF convert */
679 /* Instruction costs on PPC476 processors. */
680 static const
681 struct processor_costs ppc476_cost = {
682 COSTS_N_INSNS (4), /* mulsi */
683 COSTS_N_INSNS (4), /* mulsi_const */
684 COSTS_N_INSNS (4), /* mulsi_const9 */
685 COSTS_N_INSNS (4), /* muldi */
686 COSTS_N_INSNS (11), /* divsi */
687 COSTS_N_INSNS (11), /* divdi */
688 COSTS_N_INSNS (6), /* fp */
689 COSTS_N_INSNS (6), /* dmul */
690 COSTS_N_INSNS (19), /* sdiv */
691 COSTS_N_INSNS (33), /* ddiv */
692 32, /* l1 cache line size */
693 32, /* l1 cache */
694 512, /* l2 cache */
695 1, /* streams */
696 0, /* SF->DF convert */
699 /* Instruction costs on PPC601 processors. */
700 static const
701 struct processor_costs ppc601_cost = {
702 COSTS_N_INSNS (5), /* mulsi */
703 COSTS_N_INSNS (5), /* mulsi_const */
704 COSTS_N_INSNS (5), /* mulsi_const9 */
705 COSTS_N_INSNS (5), /* muldi */
706 COSTS_N_INSNS (36), /* divsi */
707 COSTS_N_INSNS (36), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (5), /* dmul */
710 COSTS_N_INSNS (17), /* sdiv */
711 COSTS_N_INSNS (31), /* ddiv */
712 32, /* cache line size */
713 32, /* l1 cache */
714 256, /* l2 cache */
715 1, /* streams */
716 0, /* SF->DF convert */
719 /* Instruction costs on PPC603 processors. */
720 static const
721 struct processor_costs ppc603_cost = {
722 COSTS_N_INSNS (5), /* mulsi */
723 COSTS_N_INSNS (3), /* mulsi_const */
724 COSTS_N_INSNS (2), /* mulsi_const9 */
725 COSTS_N_INSNS (5), /* muldi */
726 COSTS_N_INSNS (37), /* divsi */
727 COSTS_N_INSNS (37), /* divdi */
728 COSTS_N_INSNS (3), /* fp */
729 COSTS_N_INSNS (4), /* dmul */
730 COSTS_N_INSNS (18), /* sdiv */
731 COSTS_N_INSNS (33), /* ddiv */
732 32, /* cache line size */
733 8, /* l1 cache */
734 64, /* l2 cache */
735 1, /* streams */
736 0, /* SF->DF convert */
739 /* Instruction costs on PPC604 processors. */
740 static const
741 struct processor_costs ppc604_cost = {
742 COSTS_N_INSNS (4), /* mulsi */
743 COSTS_N_INSNS (4), /* mulsi_const */
744 COSTS_N_INSNS (4), /* mulsi_const9 */
745 COSTS_N_INSNS (4), /* muldi */
746 COSTS_N_INSNS (20), /* divsi */
747 COSTS_N_INSNS (20), /* divdi */
748 COSTS_N_INSNS (3), /* fp */
749 COSTS_N_INSNS (3), /* dmul */
750 COSTS_N_INSNS (18), /* sdiv */
751 COSTS_N_INSNS (32), /* ddiv */
752 32, /* cache line size */
753 16, /* l1 cache */
754 512, /* l2 cache */
755 1, /* streams */
756 0, /* SF->DF convert */
759 /* Instruction costs on PPC604e processors. */
760 static const
761 struct processor_costs ppc604e_cost = {
762 COSTS_N_INSNS (2), /* mulsi */
763 COSTS_N_INSNS (2), /* mulsi_const */
764 COSTS_N_INSNS (2), /* mulsi_const9 */
765 COSTS_N_INSNS (2), /* muldi */
766 COSTS_N_INSNS (20), /* divsi */
767 COSTS_N_INSNS (20), /* divdi */
768 COSTS_N_INSNS (3), /* fp */
769 COSTS_N_INSNS (3), /* dmul */
770 COSTS_N_INSNS (18), /* sdiv */
771 COSTS_N_INSNS (32), /* ddiv */
772 32, /* cache line size */
773 32, /* l1 cache */
774 1024, /* l2 cache */
775 1, /* streams */
776 0, /* SF->DF convert */
779 /* Instruction costs on PPC620 processors. */
780 static const
781 struct processor_costs ppc620_cost = {
782 COSTS_N_INSNS (5), /* mulsi */
783 COSTS_N_INSNS (4), /* mulsi_const */
784 COSTS_N_INSNS (3), /* mulsi_const9 */
785 COSTS_N_INSNS (7), /* muldi */
786 COSTS_N_INSNS (21), /* divsi */
787 COSTS_N_INSNS (37), /* divdi */
788 COSTS_N_INSNS (3), /* fp */
789 COSTS_N_INSNS (3), /* dmul */
790 COSTS_N_INSNS (18), /* sdiv */
791 COSTS_N_INSNS (32), /* ddiv */
792 128, /* cache line size */
793 32, /* l1 cache */
794 1024, /* l2 cache */
795 1, /* streams */
796 0, /* SF->DF convert */
799 /* Instruction costs on PPC630 processors. */
800 static const
801 struct processor_costs ppc630_cost = {
802 COSTS_N_INSNS (5), /* mulsi */
803 COSTS_N_INSNS (4), /* mulsi_const */
804 COSTS_N_INSNS (3), /* mulsi_const9 */
805 COSTS_N_INSNS (7), /* muldi */
806 COSTS_N_INSNS (21), /* divsi */
807 COSTS_N_INSNS (37), /* divdi */
808 COSTS_N_INSNS (3), /* fp */
809 COSTS_N_INSNS (3), /* dmul */
810 COSTS_N_INSNS (17), /* sdiv */
811 COSTS_N_INSNS (21), /* ddiv */
812 128, /* cache line size */
813 64, /* l1 cache */
814 1024, /* l2 cache */
815 1, /* streams */
816 0, /* SF->DF convert */
819 /* Instruction costs on Cell processor. */
820 /* COSTS_N_INSNS (1) ~ one add. */
821 static const
822 struct processor_costs ppccell_cost = {
823 COSTS_N_INSNS (9/2)+2, /* mulsi */
824 COSTS_N_INSNS (6/2), /* mulsi_const */
825 COSTS_N_INSNS (6/2), /* mulsi_const9 */
826 COSTS_N_INSNS (15/2)+2, /* muldi */
827 COSTS_N_INSNS (38/2), /* divsi */
828 COSTS_N_INSNS (70/2), /* divdi */
829 COSTS_N_INSNS (10/2), /* fp */
830 COSTS_N_INSNS (10/2), /* dmul */
831 COSTS_N_INSNS (74/2), /* sdiv */
832 COSTS_N_INSNS (74/2), /* ddiv */
833 128, /* cache line size */
834 32, /* l1 cache */
835 512, /* l2 cache */
836 6, /* streams */
837 0, /* SF->DF convert */
840 /* Instruction costs on PPC750 and PPC7400 processors. */
841 static const
842 struct processor_costs ppc750_cost = {
843 COSTS_N_INSNS (5), /* mulsi */
844 COSTS_N_INSNS (3), /* mulsi_const */
845 COSTS_N_INSNS (2), /* mulsi_const9 */
846 COSTS_N_INSNS (5), /* muldi */
847 COSTS_N_INSNS (17), /* divsi */
848 COSTS_N_INSNS (17), /* divdi */
849 COSTS_N_INSNS (3), /* fp */
850 COSTS_N_INSNS (3), /* dmul */
851 COSTS_N_INSNS (17), /* sdiv */
852 COSTS_N_INSNS (31), /* ddiv */
853 32, /* cache line size */
854 32, /* l1 cache */
855 512, /* l2 cache */
856 1, /* streams */
857 0, /* SF->DF convert */
860 /* Instruction costs on PPC7450 processors. */
861 static const
862 struct processor_costs ppc7450_cost = {
863 COSTS_N_INSNS (4), /* mulsi */
864 COSTS_N_INSNS (3), /* mulsi_const */
865 COSTS_N_INSNS (3), /* mulsi_const9 */
866 COSTS_N_INSNS (4), /* muldi */
867 COSTS_N_INSNS (23), /* divsi */
868 COSTS_N_INSNS (23), /* divdi */
869 COSTS_N_INSNS (5), /* fp */
870 COSTS_N_INSNS (5), /* dmul */
871 COSTS_N_INSNS (21), /* sdiv */
872 COSTS_N_INSNS (35), /* ddiv */
873 32, /* cache line size */
874 32, /* l1 cache */
875 1024, /* l2 cache */
876 1, /* streams */
877 0, /* SF->DF convert */
880 /* Instruction costs on PPC8540 processors. */
881 static const
882 struct processor_costs ppc8540_cost = {
883 COSTS_N_INSNS (4), /* mulsi */
884 COSTS_N_INSNS (4), /* mulsi_const */
885 COSTS_N_INSNS (4), /* mulsi_const9 */
886 COSTS_N_INSNS (4), /* muldi */
887 COSTS_N_INSNS (19), /* divsi */
888 COSTS_N_INSNS (19), /* divdi */
889 COSTS_N_INSNS (4), /* fp */
890 COSTS_N_INSNS (4), /* dmul */
891 COSTS_N_INSNS (29), /* sdiv */
892 COSTS_N_INSNS (29), /* ddiv */
893 32, /* cache line size */
894 32, /* l1 cache */
895 256, /* l2 cache */
896 1, /* prefetch streams /*/
897 0, /* SF->DF convert */
900 /* Instruction costs on E300C2 and E300C3 cores. */
901 static const
902 struct processor_costs ppce300c2c3_cost = {
903 COSTS_N_INSNS (4), /* mulsi */
904 COSTS_N_INSNS (4), /* mulsi_const */
905 COSTS_N_INSNS (4), /* mulsi_const9 */
906 COSTS_N_INSNS (4), /* muldi */
907 COSTS_N_INSNS (19), /* divsi */
908 COSTS_N_INSNS (19), /* divdi */
909 COSTS_N_INSNS (3), /* fp */
910 COSTS_N_INSNS (4), /* dmul */
911 COSTS_N_INSNS (18), /* sdiv */
912 COSTS_N_INSNS (33), /* ddiv */
914 16, /* l1 cache */
915 16, /* l2 cache */
916 1, /* prefetch streams /*/
917 0, /* SF->DF convert */
920 /* Instruction costs on PPCE500MC processors. */
921 static const
922 struct processor_costs ppce500mc_cost = {
923 COSTS_N_INSNS (4), /* mulsi */
924 COSTS_N_INSNS (4), /* mulsi_const */
925 COSTS_N_INSNS (4), /* mulsi_const9 */
926 COSTS_N_INSNS (4), /* muldi */
927 COSTS_N_INSNS (14), /* divsi */
928 COSTS_N_INSNS (14), /* divdi */
929 COSTS_N_INSNS (8), /* fp */
930 COSTS_N_INSNS (10), /* dmul */
931 COSTS_N_INSNS (36), /* sdiv */
932 COSTS_N_INSNS (66), /* ddiv */
933 64, /* cache line size */
934 32, /* l1 cache */
935 128, /* l2 cache */
936 1, /* prefetch streams /*/
937 0, /* SF->DF convert */
940 /* Instruction costs on PPCE500MC64 processors. */
941 static const
942 struct processor_costs ppce500mc64_cost = {
943 COSTS_N_INSNS (4), /* mulsi */
944 COSTS_N_INSNS (4), /* mulsi_const */
945 COSTS_N_INSNS (4), /* mulsi_const9 */
946 COSTS_N_INSNS (4), /* muldi */
947 COSTS_N_INSNS (14), /* divsi */
948 COSTS_N_INSNS (14), /* divdi */
949 COSTS_N_INSNS (4), /* fp */
950 COSTS_N_INSNS (10), /* dmul */
951 COSTS_N_INSNS (36), /* sdiv */
952 COSTS_N_INSNS (66), /* ddiv */
953 64, /* cache line size */
954 32, /* l1 cache */
955 128, /* l2 cache */
956 1, /* prefetch streams /*/
957 0, /* SF->DF convert */
960 /* Instruction costs on PPCE5500 processors. */
961 static const
962 struct processor_costs ppce5500_cost = {
963 COSTS_N_INSNS (5), /* mulsi */
964 COSTS_N_INSNS (5), /* mulsi_const */
965 COSTS_N_INSNS (4), /* mulsi_const9 */
966 COSTS_N_INSNS (5), /* muldi */
967 COSTS_N_INSNS (14), /* divsi */
968 COSTS_N_INSNS (14), /* divdi */
969 COSTS_N_INSNS (7), /* fp */
970 COSTS_N_INSNS (10), /* dmul */
971 COSTS_N_INSNS (36), /* sdiv */
972 COSTS_N_INSNS (66), /* ddiv */
973 64, /* cache line size */
974 32, /* l1 cache */
975 128, /* l2 cache */
976 1, /* prefetch streams /*/
977 0, /* SF->DF convert */
980 /* Instruction costs on PPCE6500 processors. */
981 static const
982 struct processor_costs ppce6500_cost = {
983 COSTS_N_INSNS (5), /* mulsi */
984 COSTS_N_INSNS (5), /* mulsi_const */
985 COSTS_N_INSNS (4), /* mulsi_const9 */
986 COSTS_N_INSNS (5), /* muldi */
987 COSTS_N_INSNS (14), /* divsi */
988 COSTS_N_INSNS (14), /* divdi */
989 COSTS_N_INSNS (7), /* fp */
990 COSTS_N_INSNS (10), /* dmul */
991 COSTS_N_INSNS (36), /* sdiv */
992 COSTS_N_INSNS (66), /* ddiv */
993 64, /* cache line size */
994 32, /* l1 cache */
995 128, /* l2 cache */
996 1, /* prefetch streams /*/
997 0, /* SF->DF convert */
1000 /* Instruction costs on AppliedMicro Titan processors. */
1001 static const
1002 struct processor_costs titan_cost = {
1003 COSTS_N_INSNS (5), /* mulsi */
1004 COSTS_N_INSNS (5), /* mulsi_const */
1005 COSTS_N_INSNS (5), /* mulsi_const9 */
1006 COSTS_N_INSNS (5), /* muldi */
1007 COSTS_N_INSNS (18), /* divsi */
1008 COSTS_N_INSNS (18), /* divdi */
1009 COSTS_N_INSNS (10), /* fp */
1010 COSTS_N_INSNS (10), /* dmul */
1011 COSTS_N_INSNS (46), /* sdiv */
1012 COSTS_N_INSNS (72), /* ddiv */
1013 32, /* cache line size */
1014 32, /* l1 cache */
1015 512, /* l2 cache */
1016 1, /* prefetch streams /*/
1017 0, /* SF->DF convert */
1020 /* Instruction costs on POWER4 and POWER5 processors. */
1021 static const
1022 struct processor_costs power4_cost = {
1023 COSTS_N_INSNS (3), /* mulsi */
1024 COSTS_N_INSNS (2), /* mulsi_const */
1025 COSTS_N_INSNS (2), /* mulsi_const9 */
1026 COSTS_N_INSNS (4), /* muldi */
1027 COSTS_N_INSNS (18), /* divsi */
1028 COSTS_N_INSNS (34), /* divdi */
1029 COSTS_N_INSNS (3), /* fp */
1030 COSTS_N_INSNS (3), /* dmul */
1031 COSTS_N_INSNS (17), /* sdiv */
1032 COSTS_N_INSNS (17), /* ddiv */
1033 128, /* cache line size */
1034 32, /* l1 cache */
1035 1024, /* l2 cache */
1036 8, /* prefetch streams /*/
1037 0, /* SF->DF convert */
1040 /* Instruction costs on POWER6 processors. */
1041 static const
1042 struct processor_costs power6_cost = {
1043 COSTS_N_INSNS (8), /* mulsi */
1044 COSTS_N_INSNS (8), /* mulsi_const */
1045 COSTS_N_INSNS (8), /* mulsi_const9 */
1046 COSTS_N_INSNS (8), /* muldi */
1047 COSTS_N_INSNS (22), /* divsi */
1048 COSTS_N_INSNS (28), /* divdi */
1049 COSTS_N_INSNS (3), /* fp */
1050 COSTS_N_INSNS (3), /* dmul */
1051 COSTS_N_INSNS (13), /* sdiv */
1052 COSTS_N_INSNS (16), /* ddiv */
1053 128, /* cache line size */
1054 64, /* l1 cache */
1055 2048, /* l2 cache */
1056 16, /* prefetch streams */
1057 0, /* SF->DF convert */
1060 /* Instruction costs on POWER7 processors. */
1061 static const
1062 struct processor_costs power7_cost = {
1063 COSTS_N_INSNS (2), /* mulsi */
1064 COSTS_N_INSNS (2), /* mulsi_const */
1065 COSTS_N_INSNS (2), /* mulsi_const9 */
1066 COSTS_N_INSNS (2), /* muldi */
1067 COSTS_N_INSNS (18), /* divsi */
1068 COSTS_N_INSNS (34), /* divdi */
1069 COSTS_N_INSNS (3), /* fp */
1070 COSTS_N_INSNS (3), /* dmul */
1071 COSTS_N_INSNS (13), /* sdiv */
1072 COSTS_N_INSNS (16), /* ddiv */
1073 128, /* cache line size */
1074 32, /* l1 cache */
1075 256, /* l2 cache */
1076 12, /* prefetch streams */
1077 COSTS_N_INSNS (3), /* SF->DF convert */
1080 /* Instruction costs on POWER8 processors. */
1081 static const
1082 struct processor_costs power8_cost = {
1083 COSTS_N_INSNS (3), /* mulsi */
1084 COSTS_N_INSNS (3), /* mulsi_const */
1085 COSTS_N_INSNS (3), /* mulsi_const9 */
1086 COSTS_N_INSNS (3), /* muldi */
1087 COSTS_N_INSNS (19), /* divsi */
1088 COSTS_N_INSNS (35), /* divdi */
1089 COSTS_N_INSNS (3), /* fp */
1090 COSTS_N_INSNS (3), /* dmul */
1091 COSTS_N_INSNS (14), /* sdiv */
1092 COSTS_N_INSNS (17), /* ddiv */
1093 128, /* cache line size */
1094 32, /* l1 cache */
1095 256, /* l2 cache */
1096 12, /* prefetch streams */
1097 COSTS_N_INSNS (3), /* SF->DF convert */
1100 /* Instruction costs on POWER9 processors. */
1101 static const
1102 struct processor_costs power9_cost = {
1103 COSTS_N_INSNS (3), /* mulsi */
1104 COSTS_N_INSNS (3), /* mulsi_const */
1105 COSTS_N_INSNS (3), /* mulsi_const9 */
1106 COSTS_N_INSNS (3), /* muldi */
1107 COSTS_N_INSNS (8), /* divsi */
1108 COSTS_N_INSNS (12), /* divdi */
1109 COSTS_N_INSNS (3), /* fp */
1110 COSTS_N_INSNS (3), /* dmul */
1111 COSTS_N_INSNS (13), /* sdiv */
1112 COSTS_N_INSNS (18), /* ddiv */
1113 128, /* cache line size */
1114 32, /* l1 cache */
1115 512, /* l2 cache */
1116 8, /* prefetch streams */
1117 COSTS_N_INSNS (3), /* SF->DF convert */
1120 /* Instruction costs on POWER A2 processors. */
1121 static const
1122 struct processor_costs ppca2_cost = {
1123 COSTS_N_INSNS (16), /* mulsi */
1124 COSTS_N_INSNS (16), /* mulsi_const */
1125 COSTS_N_INSNS (16), /* mulsi_const9 */
1126 COSTS_N_INSNS (16), /* muldi */
1127 COSTS_N_INSNS (22), /* divsi */
1128 COSTS_N_INSNS (28), /* divdi */
1129 COSTS_N_INSNS (3), /* fp */
1130 COSTS_N_INSNS (3), /* dmul */
1131 COSTS_N_INSNS (59), /* sdiv */
1132 COSTS_N_INSNS (72), /* ddiv */
1134 16, /* l1 cache */
1135 2048, /* l2 cache */
1136 16, /* prefetch streams */
1137 0, /* SF->DF convert */
1141 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1142 #undef RS6000_BUILTIN_0
1143 #undef RS6000_BUILTIN_1
1144 #undef RS6000_BUILTIN_2
1145 #undef RS6000_BUILTIN_3
1146 #undef RS6000_BUILTIN_A
1147 #undef RS6000_BUILTIN_D
1148 #undef RS6000_BUILTIN_E
1149 #undef RS6000_BUILTIN_H
1150 #undef RS6000_BUILTIN_P
1151 #undef RS6000_BUILTIN_Q
1152 #undef RS6000_BUILTIN_S
1153 #undef RS6000_BUILTIN_X
1155 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1156 { NAME, ICODE, MASK, ATTR },
1158 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1159 { NAME, ICODE, MASK, ATTR },
1161 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1162 { NAME, ICODE, MASK, ATTR },
1164 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1165 { NAME, ICODE, MASK, ATTR },
1167 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1168 { NAME, ICODE, MASK, ATTR },
1170 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1171 { NAME, ICODE, MASK, ATTR },
1173 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1174 { NAME, ICODE, MASK, ATTR },
1176 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1177 { NAME, ICODE, MASK, ATTR },
1179 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1180 { NAME, ICODE, MASK, ATTR },
1182 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1183 { NAME, ICODE, MASK, ATTR },
1185 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1186 { NAME, ICODE, MASK, ATTR },
1188 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1189 { NAME, ICODE, MASK, ATTR },
1191 struct rs6000_builtin_info_type {
1192 const char *name;
1193 const enum insn_code icode;
1194 const HOST_WIDE_INT mask;
1195 const unsigned attr;
1198 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1200 #include "rs6000-builtin.def"
1203 #undef RS6000_BUILTIN_0
1204 #undef RS6000_BUILTIN_1
1205 #undef RS6000_BUILTIN_2
1206 #undef RS6000_BUILTIN_3
1207 #undef RS6000_BUILTIN_A
1208 #undef RS6000_BUILTIN_D
1209 #undef RS6000_BUILTIN_E
1210 #undef RS6000_BUILTIN_H
1211 #undef RS6000_BUILTIN_P
1212 #undef RS6000_BUILTIN_Q
1213 #undef RS6000_BUILTIN_S
1214 #undef RS6000_BUILTIN_X
1216 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1217 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1220 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1221 static bool spe_func_has_64bit_regs_p (void);
1222 static struct machine_function * rs6000_init_machine_status (void);
1223 static int rs6000_ra_ever_killed (void);
1224 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1225 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1226 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1227 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1228 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1229 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1230 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1231 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1232 bool);
1233 static int rs6000_debug_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
1234 static bool is_microcoded_insn (rtx_insn *);
1235 static bool is_nonpipeline_insn (rtx_insn *);
1236 static bool is_cracked_insn (rtx_insn *);
1237 static bool is_load_insn (rtx, rtx *);
1238 static bool is_store_insn (rtx, rtx *);
1239 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1240 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1241 static bool insn_must_be_first_in_group (rtx_insn *);
1242 static bool insn_must_be_last_in_group (rtx_insn *);
1243 static void altivec_init_builtins (void);
1244 static tree builtin_function_type (machine_mode, machine_mode,
1245 machine_mode, machine_mode,
1246 enum rs6000_builtins, const char *name);
1247 static void rs6000_common_init_builtins (void);
1248 static void paired_init_builtins (void);
1249 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1250 static void spe_init_builtins (void);
1251 static void htm_init_builtins (void);
1252 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1253 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1254 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1255 static rs6000_stack_t *rs6000_stack_info (void);
1256 static void is_altivec_return_reg (rtx, void *);
1257 int easy_vector_constant (rtx, machine_mode);
1258 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1259 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1260 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1261 bool, bool);
1262 #if TARGET_MACHO
1263 static void macho_branch_islands (void);
1264 #endif
1265 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1266 int, int *);
1267 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1268 int, int, int *);
1269 static bool rs6000_mode_dependent_address (const_rtx);
1270 static bool rs6000_debug_mode_dependent_address (const_rtx);
1271 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1272 machine_mode, rtx);
1273 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1274 machine_mode,
1275 rtx);
1276 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1277 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1278 enum reg_class);
1279 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1280 machine_mode);
1281 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1282 enum reg_class,
1283 machine_mode);
1284 static bool rs6000_cannot_change_mode_class (machine_mode,
1285 machine_mode,
1286 enum reg_class);
1287 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1288 machine_mode,
1289 enum reg_class);
1290 static bool rs6000_save_toc_in_prologue_p (void);
1291 static rtx rs6000_internal_arg_pointer (void);
1293 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1294 int, int *)
1295 = rs6000_legitimize_reload_address;
1297 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1298 = rs6000_mode_dependent_address;
1300 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1301 machine_mode, rtx)
1302 = rs6000_secondary_reload_class;
1304 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1305 = rs6000_preferred_reload_class;
1307 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1308 machine_mode)
1309 = rs6000_secondary_memory_needed;
1311 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1312 machine_mode,
1313 enum reg_class)
1314 = rs6000_cannot_change_mode_class;
1316 const int INSN_NOT_AVAILABLE = -1;
1318 static void rs6000_print_isa_options (FILE *, int, const char *,
1319 HOST_WIDE_INT);
1320 static void rs6000_print_builtin_options (FILE *, int, const char *,
1321 HOST_WIDE_INT);
1323 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1324 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1325 enum rs6000_reg_type,
1326 machine_mode,
1327 secondary_reload_info *,
1328 bool);
1329 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1330 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1331 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1333 /* Hash table stuff for keeping track of TOC entries. */
1335 struct GTY((for_user)) toc_hash_struct
1337 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1338 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1339 rtx key;
1340 machine_mode key_mode;
1341 int labelno;
1344 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1346 static hashval_t hash (toc_hash_struct *);
1347 static bool equal (toc_hash_struct *, toc_hash_struct *);
1350 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1352 /* Hash table to keep track of the argument types for builtin functions. */
1354 struct GTY((for_user)) builtin_hash_struct
1356 tree type;
1357 machine_mode mode[4]; /* return value + 3 arguments. */
1358 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1361 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1363 static hashval_t hash (builtin_hash_struct *);
1364 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1367 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1370 /* Default register names. */
1371 char rs6000_reg_names[][8] =
1373 "0", "1", "2", "3", "4", "5", "6", "7",
1374 "8", "9", "10", "11", "12", "13", "14", "15",
1375 "16", "17", "18", "19", "20", "21", "22", "23",
1376 "24", "25", "26", "27", "28", "29", "30", "31",
1377 "0", "1", "2", "3", "4", "5", "6", "7",
1378 "8", "9", "10", "11", "12", "13", "14", "15",
1379 "16", "17", "18", "19", "20", "21", "22", "23",
1380 "24", "25", "26", "27", "28", "29", "30", "31",
1381 "mq", "lr", "ctr","ap",
1382 "0", "1", "2", "3", "4", "5", "6", "7",
1383 "ca",
1384 /* AltiVec registers. */
1385 "0", "1", "2", "3", "4", "5", "6", "7",
1386 "8", "9", "10", "11", "12", "13", "14", "15",
1387 "16", "17", "18", "19", "20", "21", "22", "23",
1388 "24", "25", "26", "27", "28", "29", "30", "31",
1389 "vrsave", "vscr",
1390 /* SPE registers. */
1391 "spe_acc", "spefscr",
1392 /* Soft frame pointer. */
1393 "sfp",
1394 /* HTM SPR registers. */
1395 "tfhar", "tfiar", "texasr",
1396 /* SPE High registers. */
1397 "0", "1", "2", "3", "4", "5", "6", "7",
1398 "8", "9", "10", "11", "12", "13", "14", "15",
1399 "16", "17", "18", "19", "20", "21", "22", "23",
1400 "24", "25", "26", "27", "28", "29", "30", "31"
1403 #ifdef TARGET_REGNAMES
1404 static const char alt_reg_names[][8] =
1406 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1407 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1408 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1409 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1410 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1411 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1412 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1413 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1414 "mq", "lr", "ctr", "ap",
1415 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1416 "ca",
1417 /* AltiVec registers. */
1418 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1419 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1420 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1421 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1422 "vrsave", "vscr",
1423 /* SPE registers. */
1424 "spe_acc", "spefscr",
1425 /* Soft frame pointer. */
1426 "sfp",
1427 /* HTM SPR registers. */
1428 "tfhar", "tfiar", "texasr",
1429 /* SPE High registers. */
1430 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1431 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1432 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1433 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1435 #endif
1437 /* Table of valid machine attributes. */
1439 static const struct attribute_spec rs6000_attribute_table[] =
1441 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1442 affects_type_identity } */
1443 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1444 false },
1445 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1446 false },
1447 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1448 false },
1449 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1450 false },
1451 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1452 false },
1453 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1454 SUBTARGET_ATTRIBUTE_TABLE,
1455 #endif
1456 { NULL, 0, 0, false, false, false, NULL, false }
1459 #ifndef TARGET_PROFILE_KERNEL
1460 #define TARGET_PROFILE_KERNEL 0
1461 #endif
1463 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1464 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1466 /* Initialize the GCC target structure. */
1467 #undef TARGET_ATTRIBUTE_TABLE
1468 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1469 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1470 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1471 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1472 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1474 #undef TARGET_ASM_ALIGNED_DI_OP
1475 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1477 /* Default unaligned ops are only provided for ELF. Find the ops needed
1478 for non-ELF systems. */
1479 #ifndef OBJECT_FORMAT_ELF
1480 #if TARGET_XCOFF
1481 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1482 64-bit targets. */
1483 #undef TARGET_ASM_UNALIGNED_HI_OP
1484 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1485 #undef TARGET_ASM_UNALIGNED_SI_OP
1486 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1487 #undef TARGET_ASM_UNALIGNED_DI_OP
1488 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1489 #else
1490 /* For Darwin. */
1491 #undef TARGET_ASM_UNALIGNED_HI_OP
1492 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1493 #undef TARGET_ASM_UNALIGNED_SI_OP
1494 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1495 #undef TARGET_ASM_UNALIGNED_DI_OP
1496 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1497 #undef TARGET_ASM_ALIGNED_DI_OP
1498 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1499 #endif
1500 #endif
1502 /* This hook deals with fixups for relocatable code and DI-mode objects
1503 in 64-bit code. */
1504 #undef TARGET_ASM_INTEGER
1505 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1507 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1508 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1509 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1510 #endif
1512 #undef TARGET_SET_UP_BY_PROLOGUE
1513 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1515 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1516 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1518 #undef TARGET_INTERNAL_ARG_POINTER
1519 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1521 #undef TARGET_HAVE_TLS
1522 #define TARGET_HAVE_TLS HAVE_AS_TLS
1524 #undef TARGET_CANNOT_FORCE_CONST_MEM
1525 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1527 #undef TARGET_DELEGITIMIZE_ADDRESS
1528 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1530 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1531 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1533 #undef TARGET_ASM_FUNCTION_PROLOGUE
1534 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1535 #undef TARGET_ASM_FUNCTION_EPILOGUE
1536 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1538 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1539 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1541 #undef TARGET_LEGITIMIZE_ADDRESS
1542 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1544 #undef TARGET_SCHED_VARIABLE_ISSUE
1545 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1547 #undef TARGET_SCHED_ISSUE_RATE
1548 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1549 #undef TARGET_SCHED_ADJUST_COST
1550 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1551 #undef TARGET_SCHED_ADJUST_PRIORITY
1552 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1553 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1554 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1555 #undef TARGET_SCHED_INIT
1556 #define TARGET_SCHED_INIT rs6000_sched_init
1557 #undef TARGET_SCHED_FINISH
1558 #define TARGET_SCHED_FINISH rs6000_sched_finish
1559 #undef TARGET_SCHED_REORDER
1560 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1561 #undef TARGET_SCHED_REORDER2
1562 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1564 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1565 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1567 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1568 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1570 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1571 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1572 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1573 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1574 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1575 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1576 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1577 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1579 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1580 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1581 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1582 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1583 rs6000_builtin_support_vector_misalignment
1584 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1585 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1586 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1587 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1588 rs6000_builtin_vectorization_cost
1589 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1590 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1591 rs6000_preferred_simd_mode
1592 #undef TARGET_VECTORIZE_INIT_COST
1593 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1594 #undef TARGET_VECTORIZE_ADD_STMT_COST
1595 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1596 #undef TARGET_VECTORIZE_FINISH_COST
1597 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1598 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1599 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1601 #undef TARGET_INIT_BUILTINS
1602 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1603 #undef TARGET_BUILTIN_DECL
1604 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1606 #undef TARGET_FOLD_BUILTIN
1607 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1609 #undef TARGET_EXPAND_BUILTIN
1610 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1612 #undef TARGET_MANGLE_TYPE
1613 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1615 #undef TARGET_INIT_LIBFUNCS
1616 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1618 #if TARGET_MACHO
1619 #undef TARGET_BINDS_LOCAL_P
1620 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1621 #endif
1623 #undef TARGET_MS_BITFIELD_LAYOUT_P
1624 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1626 #undef TARGET_ASM_OUTPUT_MI_THUNK
1627 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1629 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1630 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1632 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1633 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1635 #undef TARGET_REGISTER_MOVE_COST
1636 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1637 #undef TARGET_MEMORY_MOVE_COST
1638 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1639 #undef TARGET_CANNOT_COPY_INSN_P
1640 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1641 #undef TARGET_RTX_COSTS
1642 #define TARGET_RTX_COSTS rs6000_rtx_costs
1643 #undef TARGET_ADDRESS_COST
1644 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1646 #undef TARGET_DWARF_REGISTER_SPAN
1647 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1649 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1650 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1652 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1653 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1655 #undef TARGET_PROMOTE_FUNCTION_MODE
1656 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1658 #undef TARGET_RETURN_IN_MEMORY
1659 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1661 #undef TARGET_RETURN_IN_MSB
1662 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1664 #undef TARGET_SETUP_INCOMING_VARARGS
1665 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1667 /* Always strict argument naming on rs6000. */
1668 #undef TARGET_STRICT_ARGUMENT_NAMING
1669 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1670 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1671 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1672 #undef TARGET_SPLIT_COMPLEX_ARG
1673 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1674 #undef TARGET_MUST_PASS_IN_STACK
1675 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1676 #undef TARGET_PASS_BY_REFERENCE
1677 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1678 #undef TARGET_ARG_PARTIAL_BYTES
1679 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1680 #undef TARGET_FUNCTION_ARG_ADVANCE
1681 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1682 #undef TARGET_FUNCTION_ARG
1683 #define TARGET_FUNCTION_ARG rs6000_function_arg
1684 #undef TARGET_FUNCTION_ARG_BOUNDARY
1685 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1687 #undef TARGET_BUILD_BUILTIN_VA_LIST
1688 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1690 #undef TARGET_EXPAND_BUILTIN_VA_START
1691 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1693 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1694 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1696 #undef TARGET_EH_RETURN_FILTER_MODE
1697 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1699 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1700 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1702 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1703 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1705 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1706 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1708 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1709 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1711 #undef TARGET_MD_ASM_ADJUST
1712 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1714 #undef TARGET_OPTION_OVERRIDE
1715 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1717 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1718 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1719 rs6000_builtin_vectorized_function
1721 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1722 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1723 rs6000_builtin_md_vectorized_function
1725 #if !TARGET_MACHO
1726 #undef TARGET_STACK_PROTECT_FAIL
1727 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1728 #endif
1730 #ifdef HAVE_AS_TLS
1731 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1732 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1733 #endif
1735 /* Use a 32-bit anchor range. This leads to sequences like:
1737 addis tmp,anchor,high
1738 add dest,tmp,low
1740 where tmp itself acts as an anchor, and can be shared between
1741 accesses to the same 64k page. */
1742 #undef TARGET_MIN_ANCHOR_OFFSET
1743 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1744 #undef TARGET_MAX_ANCHOR_OFFSET
1745 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1746 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1747 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1748 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1749 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1751 #undef TARGET_BUILTIN_RECIPROCAL
1752 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1754 #undef TARGET_EXPAND_TO_RTL_HOOK
1755 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1757 #undef TARGET_INSTANTIATE_DECLS
1758 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1760 #undef TARGET_SECONDARY_RELOAD
1761 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1763 #undef TARGET_LEGITIMATE_ADDRESS_P
1764 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1766 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1767 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1769 #undef TARGET_LRA_P
1770 #define TARGET_LRA_P rs6000_lra_p
1772 #undef TARGET_CAN_ELIMINATE
1773 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1775 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1776 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1778 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1779 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1781 #undef TARGET_TRAMPOLINE_INIT
1782 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1784 #undef TARGET_FUNCTION_VALUE
1785 #define TARGET_FUNCTION_VALUE rs6000_function_value
1787 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1788 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1790 #undef TARGET_OPTION_SAVE
1791 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1793 #undef TARGET_OPTION_RESTORE
1794 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1796 #undef TARGET_OPTION_PRINT
1797 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1799 #undef TARGET_CAN_INLINE_P
1800 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1802 #undef TARGET_SET_CURRENT_FUNCTION
1803 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1805 #undef TARGET_LEGITIMATE_CONSTANT_P
1806 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1808 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1809 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1811 #undef TARGET_CAN_USE_DOLOOP_P
1812 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1814 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1815 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1817 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1818 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1819 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1820 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1821 #undef TARGET_UNWIND_WORD_MODE
1822 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1824 #undef TARGET_OFFLOAD_OPTIONS
1825 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1827 #undef TARGET_C_MODE_FOR_SUFFIX
1828 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1830 #undef TARGET_INVALID_BINARY_OP
1831 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1833 #undef TARGET_OPTAB_SUPPORTED_P
1834 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1837 /* Processor table. */
1838 struct rs6000_ptt
1840 const char *const name; /* Canonical processor name. */
1841 const enum processor_type processor; /* Processor type enum value. */
1842 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1845 static struct rs6000_ptt const processor_target_table[] =
1847 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1848 #include "rs6000-cpus.def"
1849 #undef RS6000_CPU
1852 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1853 name is invalid. */
1855 static int
1856 rs6000_cpu_name_lookup (const char *name)
1858 size_t i;
1860 if (name != NULL)
1862 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1863 if (! strcmp (name, processor_target_table[i].name))
1864 return (int)i;
1867 return -1;
1871 /* Return number of consecutive hard regs needed starting at reg REGNO
1872 to hold something of mode MODE.
1873 This is ordinarily the length in words of a value of mode MODE
1874 but can be less for certain modes in special long registers.
1876 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1877 scalar instructions. The upper 32 bits are only available to the
1878 SIMD instructions.
1880 POWER and PowerPC GPRs hold 32 bits worth;
1881 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1883 static int
1884 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
1886 unsigned HOST_WIDE_INT reg_size;
1888 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
1889 128-bit floating point that can go in vector registers, which has VSX
1890 memory addressing. */
1891 if (FP_REGNO_P (regno))
1892 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
1893 ? UNITS_PER_VSX_WORD
1894 : UNITS_PER_FP_WORD);
1896 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1897 reg_size = UNITS_PER_SPE_WORD;
1899 else if (ALTIVEC_REGNO_P (regno))
1900 reg_size = UNITS_PER_ALTIVEC_WORD;
1902 /* The value returned for SCmode in the E500 double case is 2 for
1903 ABI compatibility; storing an SCmode value in a single register
1904 would require function_arg and rs6000_spe_function_arg to handle
1905 SCmode so as to pass the value correctly in a pair of
1906 registers. */
1907 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1908 && !DECIMAL_FLOAT_MODE_P (mode) && SPE_SIMD_REGNO_P (regno))
1909 reg_size = UNITS_PER_FP_WORD;
1911 else
1912 reg_size = UNITS_PER_WORD;
1914 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1917 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1918 MODE. */
1919 static int
1920 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
1922 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1924 if (COMPLEX_MODE_P (mode))
1925 mode = GET_MODE_INNER (mode);
1927 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1928 register combinations, and use PTImode where we need to deal with quad
1929 word memory operations. Don't allow quad words in the argument or frame
1930 pointer registers, just registers 0..31. */
1931 if (mode == PTImode)
1932 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1933 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1934 && ((regno & 1) == 0));
1936 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1937 implementations. Don't allow an item to be split between a FP register
1938 and an Altivec register. Allow TImode in all VSX registers if the user
1939 asked for it. */
1940 if (TARGET_VSX && VSX_REGNO_P (regno)
1941 && (VECTOR_MEM_VSX_P (mode)
1942 || FLOAT128_VECTOR_P (mode)
1943 || reg_addr[mode].scalar_in_vmx_p
1944 || (TARGET_VSX_TIMODE && mode == TImode)
1945 || (TARGET_VADDUQM && mode == V1TImode)
1946 || (TARGET_UPPER_REGS_DI && mode == DImode)))
1948 if (FP_REGNO_P (regno))
1949 return FP_REGNO_P (last_regno);
1951 if (ALTIVEC_REGNO_P (regno))
1953 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
1954 return 0;
1956 return ALTIVEC_REGNO_P (last_regno);
1960 /* The GPRs can hold any mode, but values bigger than one register
1961 cannot go past R31. */
1962 if (INT_REGNO_P (regno))
1963 return INT_REGNO_P (last_regno);
1965 /* The float registers (except for VSX vector modes) can only hold floating
1966 modes and DImode. */
1967 if (FP_REGNO_P (regno))
1969 if (FLOAT128_VECTOR_P (mode))
1970 return false;
1972 if (SCALAR_FLOAT_MODE_P (mode)
1973 && (mode != TDmode || (regno % 2) == 0)
1974 && FP_REGNO_P (last_regno))
1975 return 1;
1977 if (GET_MODE_CLASS (mode) == MODE_INT
1978 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1979 return 1;
1981 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1982 && PAIRED_VECTOR_MODE (mode))
1983 return 1;
1985 return 0;
1988 /* The CR register can only hold CC modes. */
1989 if (CR_REGNO_P (regno))
1990 return GET_MODE_CLASS (mode) == MODE_CC;
1992 if (CA_REGNO_P (regno))
1993 return mode == Pmode || mode == SImode;
1995 /* AltiVec only in AldyVec registers. */
1996 if (ALTIVEC_REGNO_P (regno))
1997 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1998 || mode == V1TImode);
2000 /* ...but GPRs can hold SIMD data on the SPE in one register. */
2001 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
2002 return 1;
2004 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2005 and it must be able to fit within the register set. */
2007 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2010 /* Print interesting facts about registers. */
2011 static void
2012 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2014 int r, m;
2016 for (r = first_regno; r <= last_regno; ++r)
2018 const char *comma = "";
2019 int len;
2021 if (first_regno == last_regno)
2022 fprintf (stderr, "%s:\t", reg_name);
2023 else
2024 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2026 len = 8;
2027 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2028 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2030 if (len > 70)
2032 fprintf (stderr, ",\n\t");
2033 len = 8;
2034 comma = "";
2037 if (rs6000_hard_regno_nregs[m][r] > 1)
2038 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2039 rs6000_hard_regno_nregs[m][r]);
2040 else
2041 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2043 comma = ", ";
2046 if (call_used_regs[r])
2048 if (len > 70)
2050 fprintf (stderr, ",\n\t");
2051 len = 8;
2052 comma = "";
2055 len += fprintf (stderr, "%s%s", comma, "call-used");
2056 comma = ", ";
2059 if (fixed_regs[r])
2061 if (len > 70)
2063 fprintf (stderr, ",\n\t");
2064 len = 8;
2065 comma = "";
2068 len += fprintf (stderr, "%s%s", comma, "fixed");
2069 comma = ", ";
2072 if (len > 70)
2074 fprintf (stderr, ",\n\t");
2075 comma = "";
2078 len += fprintf (stderr, "%sreg-class = %s", comma,
2079 reg_class_names[(int)rs6000_regno_regclass[r]]);
2080 comma = ", ";
2082 if (len > 70)
2084 fprintf (stderr, ",\n\t");
2085 comma = "";
2088 fprintf (stderr, "%sregno = %d\n", comma, r);
2092 static const char *
2093 rs6000_debug_vector_unit (enum rs6000_vector v)
2095 const char *ret;
2097 switch (v)
2099 case VECTOR_NONE: ret = "none"; break;
2100 case VECTOR_ALTIVEC: ret = "altivec"; break;
2101 case VECTOR_VSX: ret = "vsx"; break;
2102 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2103 case VECTOR_PAIRED: ret = "paired"; break;
2104 case VECTOR_SPE: ret = "spe"; break;
2105 case VECTOR_OTHER: ret = "other"; break;
2106 default: ret = "unknown"; break;
2109 return ret;
2112 /* Inner function printing just the address mask for a particular reload
2113 register class. */
2114 DEBUG_FUNCTION char *
2115 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2117 static char ret[8];
2118 char *p = ret;
2120 if ((mask & RELOAD_REG_VALID) != 0)
2121 *p++ = 'v';
2122 else if (keep_spaces)
2123 *p++ = ' ';
2125 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2126 *p++ = 'm';
2127 else if (keep_spaces)
2128 *p++ = ' ';
2130 if ((mask & RELOAD_REG_INDEXED) != 0)
2131 *p++ = 'i';
2132 else if (keep_spaces)
2133 *p++ = ' ';
2135 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2136 *p++ = 'O';
2137 else if ((mask & RELOAD_REG_OFFSET) != 0)
2138 *p++ = 'o';
2139 else if (keep_spaces)
2140 *p++ = ' ';
2142 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2143 *p++ = '+';
2144 else if (keep_spaces)
2145 *p++ = ' ';
2147 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2148 *p++ = '+';
2149 else if (keep_spaces)
2150 *p++ = ' ';
2152 if ((mask & RELOAD_REG_AND_M16) != 0)
2153 *p++ = '&';
2154 else if (keep_spaces)
2155 *p++ = ' ';
2157 *p = '\0';
2159 return ret;
2162 /* Print the address masks in a human readble fashion. */
2163 DEBUG_FUNCTION void
2164 rs6000_debug_print_mode (ssize_t m)
2166 ssize_t rc;
2167 int spaces = 0;
2168 bool fuse_extra_p;
2170 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2171 for (rc = 0; rc < N_RELOAD_REG; rc++)
2172 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2173 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2175 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2176 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2177 fprintf (stderr, " Reload=%c%c",
2178 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2179 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2180 else
2181 spaces += sizeof (" Reload=sl") - 1;
2183 if (reg_addr[m].scalar_in_vmx_p)
2185 fprintf (stderr, "%*s Upper=y", spaces, "");
2186 spaces = 0;
2188 else
2189 spaces += sizeof (" Upper=y") - 1;
2191 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2192 || reg_addr[m].fused_toc);
2193 if (!fuse_extra_p)
2195 for (rc = 0; rc < N_RELOAD_REG; rc++)
2197 if (rc != RELOAD_REG_ANY)
2199 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2200 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2201 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2202 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2203 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2205 fuse_extra_p = true;
2206 break;
2212 if (fuse_extra_p)
2214 fprintf (stderr, "%*s Fuse:", spaces, "");
2215 spaces = 0;
2217 for (rc = 0; rc < N_RELOAD_REG; rc++)
2219 if (rc != RELOAD_REG_ANY)
2221 char load, store;
2223 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2224 load = 'l';
2225 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2226 load = 'L';
2227 else
2228 load = '-';
2230 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2231 store = 's';
2232 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2233 store = 'S';
2234 else
2235 store = '-';
2237 if (load == '-' && store == '-')
2238 spaces += 5;
2239 else
2241 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2242 reload_reg_map[rc].name[0], load, store);
2243 spaces = 0;
2248 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2250 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2251 spaces = 0;
2253 else
2254 spaces += sizeof (" P8gpr") - 1;
2256 if (reg_addr[m].fused_toc)
2258 fprintf (stderr, "%*sToc", (spaces + 1), "");
2259 spaces = 0;
2261 else
2262 spaces += sizeof (" Toc") - 1;
2264 else
2265 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2267 if (rs6000_vector_unit[m] != VECTOR_NONE
2268 || rs6000_vector_mem[m] != VECTOR_NONE)
2270 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2271 spaces, "",
2272 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2273 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2276 fputs ("\n", stderr);
2279 #define DEBUG_FMT_ID "%-32s= "
2280 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2281 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2282 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2284 /* Print various interesting information with -mdebug=reg. */
2285 static void
2286 rs6000_debug_reg_global (void)
2288 static const char *const tf[2] = { "false", "true" };
2289 const char *nl = (const char *)0;
2290 int m;
2291 size_t m1, m2, v;
2292 char costly_num[20];
2293 char nop_num[20];
2294 char flags_buffer[40];
2295 const char *costly_str;
2296 const char *nop_str;
2297 const char *trace_str;
2298 const char *abi_str;
2299 const char *cmodel_str;
2300 struct cl_target_option cl_opts;
2302 /* Modes we want tieable information on. */
2303 static const machine_mode print_tieable_modes[] = {
2304 QImode,
2305 HImode,
2306 SImode,
2307 DImode,
2308 TImode,
2309 PTImode,
2310 SFmode,
2311 DFmode,
2312 TFmode,
2313 IFmode,
2314 KFmode,
2315 SDmode,
2316 DDmode,
2317 TDmode,
2318 V8QImode,
2319 V4HImode,
2320 V2SImode,
2321 V16QImode,
2322 V8HImode,
2323 V4SImode,
2324 V2DImode,
2325 V1TImode,
2326 V32QImode,
2327 V16HImode,
2328 V8SImode,
2329 V4DImode,
2330 V2TImode,
2331 V2SFmode,
2332 V4SFmode,
2333 V2DFmode,
2334 V8SFmode,
2335 V4DFmode,
2336 CCmode,
2337 CCUNSmode,
2338 CCEQmode,
2341 /* Virtual regs we are interested in. */
2342 const static struct {
2343 int regno; /* register number. */
2344 const char *name; /* register name. */
2345 } virtual_regs[] = {
2346 { STACK_POINTER_REGNUM, "stack pointer:" },
2347 { TOC_REGNUM, "toc: " },
2348 { STATIC_CHAIN_REGNUM, "static chain: " },
2349 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2350 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2351 { ARG_POINTER_REGNUM, "arg pointer: " },
2352 { FRAME_POINTER_REGNUM, "frame pointer:" },
2353 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2354 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2355 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2356 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2357 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2358 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2359 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2360 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2361 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2364 fputs ("\nHard register information:\n", stderr);
2365 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2366 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2367 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2368 LAST_ALTIVEC_REGNO,
2369 "vs");
2370 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2371 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2372 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2373 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2374 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2375 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2376 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2377 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2379 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2380 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2381 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2383 fprintf (stderr,
2384 "\n"
2385 "d reg_class = %s\n"
2386 "f reg_class = %s\n"
2387 "v reg_class = %s\n"
2388 "wa reg_class = %s\n"
2389 "wb reg_class = %s\n"
2390 "wd reg_class = %s\n"
2391 "we reg_class = %s\n"
2392 "wf reg_class = %s\n"
2393 "wg reg_class = %s\n"
2394 "wh reg_class = %s\n"
2395 "wi reg_class = %s\n"
2396 "wj reg_class = %s\n"
2397 "wk reg_class = %s\n"
2398 "wl reg_class = %s\n"
2399 "wm reg_class = %s\n"
2400 "wo reg_class = %s\n"
2401 "wp reg_class = %s\n"
2402 "wq reg_class = %s\n"
2403 "wr reg_class = %s\n"
2404 "ws reg_class = %s\n"
2405 "wt reg_class = %s\n"
2406 "wu reg_class = %s\n"
2407 "wv reg_class = %s\n"
2408 "ww reg_class = %s\n"
2409 "wx reg_class = %s\n"
2410 "wy reg_class = %s\n"
2411 "wz reg_class = %s\n"
2412 "\n",
2413 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2414 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2415 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2416 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2417 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2418 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2419 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2420 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2421 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2422 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2423 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2424 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2425 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2426 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2427 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2428 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2429 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2430 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2431 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2432 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2433 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2434 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2435 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2436 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2437 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2438 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2439 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2441 nl = "\n";
2442 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2443 rs6000_debug_print_mode (m);
2445 fputs ("\n", stderr);
2447 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2449 machine_mode mode1 = print_tieable_modes[m1];
2450 bool first_time = true;
2452 nl = (const char *)0;
2453 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2455 machine_mode mode2 = print_tieable_modes[m2];
2456 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2458 if (first_time)
2460 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2461 nl = "\n";
2462 first_time = false;
2465 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2469 if (!first_time)
2470 fputs ("\n", stderr);
2473 if (nl)
2474 fputs (nl, stderr);
2476 if (rs6000_recip_control)
2478 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2480 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2481 if (rs6000_recip_bits[m])
2483 fprintf (stderr,
2484 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2485 GET_MODE_NAME (m),
2486 (RS6000_RECIP_AUTO_RE_P (m)
2487 ? "auto"
2488 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2489 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2490 ? "auto"
2491 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2494 fputs ("\n", stderr);
2497 if (rs6000_cpu_index >= 0)
2499 const char *name = processor_target_table[rs6000_cpu_index].name;
2500 HOST_WIDE_INT flags
2501 = processor_target_table[rs6000_cpu_index].target_enable;
2503 sprintf (flags_buffer, "-mcpu=%s flags", name);
2504 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2506 else
2507 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2509 if (rs6000_tune_index >= 0)
2511 const char *name = processor_target_table[rs6000_tune_index].name;
2512 HOST_WIDE_INT flags
2513 = processor_target_table[rs6000_tune_index].target_enable;
2515 sprintf (flags_buffer, "-mtune=%s flags", name);
2516 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2518 else
2519 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2521 cl_target_option_save (&cl_opts, &global_options);
2522 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2523 rs6000_isa_flags);
2525 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2526 rs6000_isa_flags_explicit);
2528 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2529 rs6000_builtin_mask);
2531 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2533 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2534 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2536 switch (rs6000_sched_costly_dep)
2538 case max_dep_latency:
2539 costly_str = "max_dep_latency";
2540 break;
2542 case no_dep_costly:
2543 costly_str = "no_dep_costly";
2544 break;
2546 case all_deps_costly:
2547 costly_str = "all_deps_costly";
2548 break;
2550 case true_store_to_load_dep_costly:
2551 costly_str = "true_store_to_load_dep_costly";
2552 break;
2554 case store_to_load_dep_costly:
2555 costly_str = "store_to_load_dep_costly";
2556 break;
2558 default:
2559 costly_str = costly_num;
2560 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2561 break;
2564 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2566 switch (rs6000_sched_insert_nops)
2568 case sched_finish_regroup_exact:
2569 nop_str = "sched_finish_regroup_exact";
2570 break;
2572 case sched_finish_pad_groups:
2573 nop_str = "sched_finish_pad_groups";
2574 break;
2576 case sched_finish_none:
2577 nop_str = "sched_finish_none";
2578 break;
2580 default:
2581 nop_str = nop_num;
2582 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2583 break;
2586 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2588 switch (rs6000_sdata)
2590 default:
2591 case SDATA_NONE:
2592 break;
2594 case SDATA_DATA:
2595 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2596 break;
2598 case SDATA_SYSV:
2599 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2600 break;
2602 case SDATA_EABI:
2603 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2604 break;
2608 switch (rs6000_traceback)
2610 case traceback_default: trace_str = "default"; break;
2611 case traceback_none: trace_str = "none"; break;
2612 case traceback_part: trace_str = "part"; break;
2613 case traceback_full: trace_str = "full"; break;
2614 default: trace_str = "unknown"; break;
2617 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2619 switch (rs6000_current_cmodel)
2621 case CMODEL_SMALL: cmodel_str = "small"; break;
2622 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2623 case CMODEL_LARGE: cmodel_str = "large"; break;
2624 default: cmodel_str = "unknown"; break;
2627 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2629 switch (rs6000_current_abi)
2631 case ABI_NONE: abi_str = "none"; break;
2632 case ABI_AIX: abi_str = "aix"; break;
2633 case ABI_ELFv2: abi_str = "ELFv2"; break;
2634 case ABI_V4: abi_str = "V4"; break;
2635 case ABI_DARWIN: abi_str = "darwin"; break;
2636 default: abi_str = "unknown"; break;
2639 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2641 if (rs6000_altivec_abi)
2642 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2644 if (rs6000_spe_abi)
2645 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2647 if (rs6000_darwin64_abi)
2648 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2650 if (rs6000_float_gprs)
2651 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2653 fprintf (stderr, DEBUG_FMT_S, "fprs",
2654 (TARGET_FPRS ? "true" : "false"));
2656 fprintf (stderr, DEBUG_FMT_S, "single_float",
2657 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2659 fprintf (stderr, DEBUG_FMT_S, "double_float",
2660 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2662 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2663 (TARGET_SOFT_FLOAT ? "true" : "false"));
2665 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2666 (TARGET_E500_SINGLE ? "true" : "false"));
2668 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2669 (TARGET_E500_DOUBLE ? "true" : "false"));
2671 if (TARGET_LINK_STACK)
2672 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2674 fprintf (stderr, DEBUG_FMT_S, "lra", TARGET_LRA ? "true" : "false");
2676 if (TARGET_P8_FUSION)
2678 char options[80];
2680 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2681 if (TARGET_TOC_FUSION)
2682 strcat (options, ", toc");
2684 if (TARGET_P8_FUSION_SIGN)
2685 strcat (options, ", sign");
2687 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2690 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2691 TARGET_SECURE_PLT ? "secure" : "bss");
2692 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2693 aix_struct_return ? "aix" : "sysv");
2694 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2695 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2696 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2697 tf[!!rs6000_align_branch_targets]);
2698 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2699 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2700 rs6000_long_double_type_size);
2701 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2702 (int)rs6000_sched_restricted_insns_priority);
2703 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2704 (int)END_BUILTINS);
2705 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2706 (int)RS6000_BUILTIN_COUNT);
2708 if (TARGET_VSX)
2709 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2710 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2712 if (TARGET_DIRECT_MOVE_128)
2713 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2714 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2718 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2719 legitimate address support to figure out the appropriate addressing to
2720 use. */
2722 static void
2723 rs6000_setup_reg_addr_masks (void)
2725 ssize_t rc, reg, m, nregs;
2726 addr_mask_type any_addr_mask, addr_mask;
2728 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2730 machine_mode m2 = (machine_mode) m;
2731 bool complex_p = false;
2732 size_t msize;
2734 if (COMPLEX_MODE_P (m2))
2736 complex_p = true;
2737 m2 = GET_MODE_INNER (m2);
2740 msize = GET_MODE_SIZE (m2);
2742 /* SDmode is special in that we want to access it only via REG+REG
2743 addressing on power7 and above, since we want to use the LFIWZX and
2744 STFIWZX instructions to load it. */
2745 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2747 any_addr_mask = 0;
2748 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2750 addr_mask = 0;
2751 reg = reload_reg_map[rc].reg;
2753 /* Can mode values go in the GPR/FPR/Altivec registers? */
2754 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2756 nregs = rs6000_hard_regno_nregs[m][reg];
2757 addr_mask |= RELOAD_REG_VALID;
2759 /* Indicate if the mode takes more than 1 physical register. If
2760 it takes a single register, indicate it can do REG+REG
2761 addressing. */
2762 if (nregs > 1 || m == BLKmode || complex_p)
2763 addr_mask |= RELOAD_REG_MULTIPLE;
2764 else
2765 addr_mask |= RELOAD_REG_INDEXED;
2767 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2768 addressing. Restrict addressing on SPE for 64-bit types
2769 because of the SUBREG hackery used to address 64-bit floats in
2770 '32-bit' GPRs. If we allow scalars into Altivec registers,
2771 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2773 if (TARGET_UPDATE
2774 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2775 && msize <= 8
2776 && !VECTOR_MODE_P (m2)
2777 && !FLOAT128_VECTOR_P (m2)
2778 && !complex_p
2779 && (m2 != DFmode || !TARGET_UPPER_REGS_DF)
2780 && (m2 != SFmode || !TARGET_UPPER_REGS_SF)
2781 && !(TARGET_E500_DOUBLE && msize == 8))
2783 addr_mask |= RELOAD_REG_PRE_INCDEC;
2785 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2786 we don't allow PRE_MODIFY for some multi-register
2787 operations. */
2788 switch (m)
2790 default:
2791 addr_mask |= RELOAD_REG_PRE_MODIFY;
2792 break;
2794 case DImode:
2795 if (TARGET_POWERPC64)
2796 addr_mask |= RELOAD_REG_PRE_MODIFY;
2797 break;
2799 case DFmode:
2800 case DDmode:
2801 if (TARGET_DF_INSN)
2802 addr_mask |= RELOAD_REG_PRE_MODIFY;
2803 break;
2808 /* GPR and FPR registers can do REG+OFFSET addressing, except
2809 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2810 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2811 if ((addr_mask != 0) && !indexed_only_p
2812 && msize <= 8
2813 && (rc == RELOAD_REG_GPR
2814 || ((msize == 8 || m2 == SFmode)
2815 && (rc == RELOAD_REG_FPR
2816 || (rc == RELOAD_REG_VMX
2817 && TARGET_P9_DFORM_SCALAR)))))
2818 addr_mask |= RELOAD_REG_OFFSET;
2820 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2821 instructions are enabled. The offset for 128-bit VSX registers is
2822 only 12-bits. While GPRs can handle the full offset range, VSX
2823 registers can only handle the restricted range. */
2824 else if ((addr_mask != 0) && !indexed_only_p
2825 && msize == 16 && TARGET_P9_DFORM_VECTOR
2826 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2827 || (m2 == TImode && TARGET_VSX_TIMODE)))
2829 addr_mask |= RELOAD_REG_OFFSET;
2830 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2831 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2834 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2835 addressing on 128-bit types. */
2836 if (rc == RELOAD_REG_VMX && msize == 16
2837 && (addr_mask & RELOAD_REG_VALID) != 0)
2838 addr_mask |= RELOAD_REG_AND_M16;
2840 reg_addr[m].addr_mask[rc] = addr_mask;
2841 any_addr_mask |= addr_mask;
2844 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2849 /* Initialize the various global tables that are based on register size. */
2850 static void
2851 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2853 ssize_t r, m, c;
2854 int align64;
2855 int align32;
2857 /* Precalculate REGNO_REG_CLASS. */
2858 rs6000_regno_regclass[0] = GENERAL_REGS;
2859 for (r = 1; r < 32; ++r)
2860 rs6000_regno_regclass[r] = BASE_REGS;
2862 for (r = 32; r < 64; ++r)
2863 rs6000_regno_regclass[r] = FLOAT_REGS;
2865 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2866 rs6000_regno_regclass[r] = NO_REGS;
2868 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2869 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2871 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2872 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2873 rs6000_regno_regclass[r] = CR_REGS;
2875 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2876 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2877 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2878 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2879 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2880 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2881 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2882 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2883 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2884 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2885 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2886 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2888 /* Precalculate register class to simpler reload register class. We don't
2889 need all of the register classes that are combinations of different
2890 classes, just the simple ones that have constraint letters. */
2891 for (c = 0; c < N_REG_CLASSES; c++)
2892 reg_class_to_reg_type[c] = NO_REG_TYPE;
2894 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2895 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2896 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2897 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2898 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2899 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2900 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2901 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2902 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2903 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2904 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2905 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2907 if (TARGET_VSX)
2909 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2910 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2912 else
2914 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2915 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2918 /* Precalculate the valid memory formats as well as the vector information,
2919 this must be set up before the rs6000_hard_regno_nregs_internal calls
2920 below. */
2921 gcc_assert ((int)VECTOR_NONE == 0);
2922 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2923 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2925 gcc_assert ((int)CODE_FOR_nothing == 0);
2926 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2928 gcc_assert ((int)NO_REGS == 0);
2929 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2931 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2932 believes it can use native alignment or still uses 128-bit alignment. */
2933 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2935 align64 = 64;
2936 align32 = 32;
2938 else
2940 align64 = 128;
2941 align32 = 128;
2944 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
2945 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
2946 if (TARGET_FLOAT128)
2948 rs6000_vector_mem[KFmode] = VECTOR_VSX;
2949 rs6000_vector_align[KFmode] = 128;
2951 if (FLOAT128_IEEE_P (TFmode))
2953 rs6000_vector_mem[TFmode] = VECTOR_VSX;
2954 rs6000_vector_align[TFmode] = 128;
2958 /* V2DF mode, VSX only. */
2959 if (TARGET_VSX)
2961 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2962 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2963 rs6000_vector_align[V2DFmode] = align64;
2966 /* V4SF mode, either VSX or Altivec. */
2967 if (TARGET_VSX)
2969 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2970 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2971 rs6000_vector_align[V4SFmode] = align32;
2973 else if (TARGET_ALTIVEC)
2975 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2976 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2977 rs6000_vector_align[V4SFmode] = align32;
2980 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2981 and stores. */
2982 if (TARGET_ALTIVEC)
2984 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2985 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2986 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2987 rs6000_vector_align[V4SImode] = align32;
2988 rs6000_vector_align[V8HImode] = align32;
2989 rs6000_vector_align[V16QImode] = align32;
2991 if (TARGET_VSX)
2993 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2994 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2995 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2997 else
2999 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3000 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3001 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3005 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3006 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3007 if (TARGET_VSX)
3009 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3010 rs6000_vector_unit[V2DImode]
3011 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3012 rs6000_vector_align[V2DImode] = align64;
3014 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3015 rs6000_vector_unit[V1TImode]
3016 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3017 rs6000_vector_align[V1TImode] = 128;
3020 /* DFmode, see if we want to use the VSX unit. Memory is handled
3021 differently, so don't set rs6000_vector_mem. */
3022 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
3024 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3025 rs6000_vector_align[DFmode] = 64;
3028 /* SFmode, see if we want to use the VSX unit. */
3029 if (TARGET_P8_VECTOR && TARGET_VSX_SCALAR_FLOAT)
3031 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3032 rs6000_vector_align[SFmode] = 32;
3035 /* Allow TImode in VSX register and set the VSX memory macros. */
3036 if (TARGET_VSX && TARGET_VSX_TIMODE)
3038 rs6000_vector_mem[TImode] = VECTOR_VSX;
3039 rs6000_vector_align[TImode] = align64;
3042 /* TODO add SPE and paired floating point vector support. */
3044 /* Register class constraints for the constraints that depend on compile
3045 switches. When the VSX code was added, different constraints were added
3046 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3047 of the VSX registers are used. The register classes for scalar floating
3048 point types is set, based on whether we allow that type into the upper
3049 (Altivec) registers. GCC has register classes to target the Altivec
3050 registers for load/store operations, to select using a VSX memory
3051 operation instead of the traditional floating point operation. The
3052 constraints are:
3054 d - Register class to use with traditional DFmode instructions.
3055 f - Register class to use with traditional SFmode instructions.
3056 v - Altivec register.
3057 wa - Any VSX register.
3058 wc - Reserved to represent individual CR bits (used in LLVM).
3059 wd - Preferred register class for V2DFmode.
3060 wf - Preferred register class for V4SFmode.
3061 wg - Float register for power6x move insns.
3062 wh - FP register for direct move instructions.
3063 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3064 wj - FP or VSX register to hold 64-bit integers for direct moves.
3065 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3066 wl - Float register if we can do 32-bit signed int loads.
3067 wm - VSX register for ISA 2.07 direct move operations.
3068 wn - always NO_REGS.
3069 wr - GPR if 64-bit mode is permitted.
3070 ws - Register class to do ISA 2.06 DF operations.
3071 wt - VSX register for TImode in VSX registers.
3072 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3073 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3074 ww - Register class to do SF conversions in with VSX operations.
3075 wx - Float register if we can do 32-bit int stores.
3076 wy - Register class to do ISA 2.07 SF operations.
3077 wz - Float register if we can do 32-bit unsigned int loads. */
3079 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3080 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3082 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3083 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3085 if (TARGET_VSX)
3087 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3088 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3089 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3091 if (TARGET_VSX_TIMODE)
3092 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3094 if (TARGET_UPPER_REGS_DF) /* DFmode */
3096 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
3097 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
3099 else
3100 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
3102 if (TARGET_UPPER_REGS_DF) /* DImode */
3103 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS;
3104 else
3105 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS;
3108 /* Add conditional constraints based on various options, to allow us to
3109 collapse multiple insn patterns. */
3110 if (TARGET_ALTIVEC)
3111 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3113 if (TARGET_MFPGPR) /* DFmode */
3114 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3116 if (TARGET_LFIWAX)
3117 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3119 if (TARGET_DIRECT_MOVE)
3121 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3122 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3123 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3124 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3125 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3126 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3129 if (TARGET_POWERPC64)
3130 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3132 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
3134 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3135 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3136 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3138 else if (TARGET_P8_VECTOR)
3140 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
3141 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3143 else if (TARGET_VSX)
3144 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3146 if (TARGET_STFIWX)
3147 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3149 if (TARGET_LFIWZX)
3150 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3152 if (TARGET_FLOAT128)
3154 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3155 if (FLOAT128_IEEE_P (TFmode))
3156 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3159 /* Support for new D-form instructions. */
3160 if (TARGET_P9_DFORM_SCALAR)
3161 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3163 /* Support for ISA 3.0 (power9) vectors. */
3164 if (TARGET_P9_VECTOR)
3165 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3167 /* Support for new direct moves (ISA 3.0 + 64bit). */
3168 if (TARGET_DIRECT_MOVE_128)
3169 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3171 /* Set up the reload helper and direct move functions. */
3172 if (TARGET_VSX || TARGET_ALTIVEC)
3174 if (TARGET_64BIT)
3176 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3177 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3178 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3179 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3180 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3181 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3182 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3183 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3184 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3185 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3186 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3187 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3188 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3189 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3190 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3191 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3192 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3193 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3194 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3195 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3197 if (FLOAT128_VECTOR_P (KFmode))
3199 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3200 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3203 if (FLOAT128_VECTOR_P (TFmode))
3205 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3206 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3209 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3210 available. */
3211 if (TARGET_NO_SDMODE_STACK)
3213 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3214 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3217 if (TARGET_VSX_TIMODE)
3219 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3220 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3223 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3225 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3226 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3227 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3228 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3229 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3230 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3231 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3232 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3233 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3235 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3236 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3237 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3238 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3239 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3240 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3241 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3242 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3243 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3245 if (FLOAT128_VECTOR_P (KFmode))
3247 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3248 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3251 if (FLOAT128_VECTOR_P (TFmode))
3253 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3254 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3258 else
3260 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3261 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3262 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3263 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3264 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3265 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3266 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3267 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3268 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3269 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3270 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3271 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3272 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3273 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3274 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3275 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3276 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3277 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3278 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3279 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3281 if (FLOAT128_VECTOR_P (KFmode))
3283 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3284 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3287 if (FLOAT128_IEEE_P (TFmode))
3289 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3290 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3293 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3294 available. */
3295 if (TARGET_NO_SDMODE_STACK)
3297 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3298 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3301 if (TARGET_VSX_TIMODE)
3303 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3304 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3307 if (TARGET_DIRECT_MOVE)
3309 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3310 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3311 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3315 if (TARGET_UPPER_REGS_DF)
3316 reg_addr[DFmode].scalar_in_vmx_p = true;
3318 if (TARGET_UPPER_REGS_DI)
3319 reg_addr[DImode].scalar_in_vmx_p = true;
3321 if (TARGET_UPPER_REGS_SF)
3322 reg_addr[SFmode].scalar_in_vmx_p = true;
3325 /* Setup the fusion operations. */
3326 if (TARGET_P8_FUSION)
3328 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3329 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3330 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3331 if (TARGET_64BIT)
3332 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3335 if (TARGET_P9_FUSION)
3337 struct fuse_insns {
3338 enum machine_mode mode; /* mode of the fused type. */
3339 enum machine_mode pmode; /* pointer mode. */
3340 enum rs6000_reload_reg_type rtype; /* register type. */
3341 enum insn_code load; /* load insn. */
3342 enum insn_code store; /* store insn. */
3345 static const struct fuse_insns addis_insns[] = {
3346 { SFmode, DImode, RELOAD_REG_FPR,
3347 CODE_FOR_fusion_fpr_di_sf_load,
3348 CODE_FOR_fusion_fpr_di_sf_store },
3350 { SFmode, SImode, RELOAD_REG_FPR,
3351 CODE_FOR_fusion_fpr_si_sf_load,
3352 CODE_FOR_fusion_fpr_si_sf_store },
3354 { DFmode, DImode, RELOAD_REG_FPR,
3355 CODE_FOR_fusion_fpr_di_df_load,
3356 CODE_FOR_fusion_fpr_di_df_store },
3358 { DFmode, SImode, RELOAD_REG_FPR,
3359 CODE_FOR_fusion_fpr_si_df_load,
3360 CODE_FOR_fusion_fpr_si_df_store },
3362 { DImode, DImode, RELOAD_REG_FPR,
3363 CODE_FOR_fusion_fpr_di_di_load,
3364 CODE_FOR_fusion_fpr_di_di_store },
3366 { DImode, SImode, RELOAD_REG_FPR,
3367 CODE_FOR_fusion_fpr_si_di_load,
3368 CODE_FOR_fusion_fpr_si_di_store },
3370 { QImode, DImode, RELOAD_REG_GPR,
3371 CODE_FOR_fusion_gpr_di_qi_load,
3372 CODE_FOR_fusion_gpr_di_qi_store },
3374 { QImode, SImode, RELOAD_REG_GPR,
3375 CODE_FOR_fusion_gpr_si_qi_load,
3376 CODE_FOR_fusion_gpr_si_qi_store },
3378 { HImode, DImode, RELOAD_REG_GPR,
3379 CODE_FOR_fusion_gpr_di_hi_load,
3380 CODE_FOR_fusion_gpr_di_hi_store },
3382 { HImode, SImode, RELOAD_REG_GPR,
3383 CODE_FOR_fusion_gpr_si_hi_load,
3384 CODE_FOR_fusion_gpr_si_hi_store },
3386 { SImode, DImode, RELOAD_REG_GPR,
3387 CODE_FOR_fusion_gpr_di_si_load,
3388 CODE_FOR_fusion_gpr_di_si_store },
3390 { SImode, SImode, RELOAD_REG_GPR,
3391 CODE_FOR_fusion_gpr_si_si_load,
3392 CODE_FOR_fusion_gpr_si_si_store },
3394 { SFmode, DImode, RELOAD_REG_GPR,
3395 CODE_FOR_fusion_gpr_di_sf_load,
3396 CODE_FOR_fusion_gpr_di_sf_store },
3398 { SFmode, SImode, RELOAD_REG_GPR,
3399 CODE_FOR_fusion_gpr_si_sf_load,
3400 CODE_FOR_fusion_gpr_si_sf_store },
3402 { DImode, DImode, RELOAD_REG_GPR,
3403 CODE_FOR_fusion_gpr_di_di_load,
3404 CODE_FOR_fusion_gpr_di_di_store },
3406 { DFmode, DImode, RELOAD_REG_GPR,
3407 CODE_FOR_fusion_gpr_di_df_load,
3408 CODE_FOR_fusion_gpr_di_df_store },
3411 enum machine_mode cur_pmode = Pmode;
3412 size_t i;
3414 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3416 enum machine_mode xmode = addis_insns[i].mode;
3417 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3419 if (addis_insns[i].pmode != cur_pmode)
3420 continue;
3422 if (rtype == RELOAD_REG_FPR
3423 && (!TARGET_HARD_FLOAT || !TARGET_FPRS))
3424 continue;
3426 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3427 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3431 /* Note which types we support fusing TOC setup plus memory insn. We only do
3432 fused TOCs for medium/large code models. */
3433 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3434 && (TARGET_CMODEL != CMODEL_SMALL))
3436 reg_addr[QImode].fused_toc = true;
3437 reg_addr[HImode].fused_toc = true;
3438 reg_addr[SImode].fused_toc = true;
3439 reg_addr[DImode].fused_toc = true;
3440 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3442 if (TARGET_SINGLE_FLOAT)
3443 reg_addr[SFmode].fused_toc = true;
3444 if (TARGET_DOUBLE_FLOAT)
3445 reg_addr[DFmode].fused_toc = true;
3449 /* Precalculate HARD_REGNO_NREGS. */
3450 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3451 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3452 rs6000_hard_regno_nregs[m][r]
3453 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3455 /* Precalculate HARD_REGNO_MODE_OK. */
3456 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3457 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3458 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
3459 rs6000_hard_regno_mode_ok_p[m][r] = true;
3461 /* Precalculate CLASS_MAX_NREGS sizes. */
3462 for (c = 0; c < LIM_REG_CLASSES; ++c)
3464 int reg_size;
3466 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3467 reg_size = UNITS_PER_VSX_WORD;
3469 else if (c == ALTIVEC_REGS)
3470 reg_size = UNITS_PER_ALTIVEC_WORD;
3472 else if (c == FLOAT_REGS)
3473 reg_size = UNITS_PER_FP_WORD;
3475 else
3476 reg_size = UNITS_PER_WORD;
3478 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3480 machine_mode m2 = (machine_mode)m;
3481 int reg_size2 = reg_size;
3483 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3484 in VSX. */
3485 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3486 reg_size2 = UNITS_PER_FP_WORD;
3488 rs6000_class_max_nregs[m][c]
3489 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3493 if (TARGET_E500_DOUBLE)
3494 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
3496 /* Calculate which modes to automatically generate code to use a the
3497 reciprocal divide and square root instructions. In the future, possibly
3498 automatically generate the instructions even if the user did not specify
3499 -mrecip. The older machines double precision reciprocal sqrt estimate is
3500 not accurate enough. */
3501 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3502 if (TARGET_FRES)
3503 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3504 if (TARGET_FRE)
3505 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3506 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3507 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3508 if (VECTOR_UNIT_VSX_P (V2DFmode))
3509 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3511 if (TARGET_FRSQRTES)
3512 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3513 if (TARGET_FRSQRTE)
3514 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3515 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3516 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3517 if (VECTOR_UNIT_VSX_P (V2DFmode))
3518 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3520 if (rs6000_recip_control)
3522 if (!flag_finite_math_only)
3523 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
3524 if (flag_trapping_math)
3525 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
3526 if (!flag_reciprocal_math)
3527 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
3528 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3530 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3531 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3532 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3534 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3535 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3536 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3538 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3539 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3540 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3542 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3543 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3544 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3547 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3548 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3550 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3551 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3552 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3554 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3555 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3556 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3558 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3559 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3560 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3564 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3565 legitimate address support to figure out the appropriate addressing to
3566 use. */
3567 rs6000_setup_reg_addr_masks ();
3569 if (global_init_p || TARGET_DEBUG_TARGET)
3571 if (TARGET_DEBUG_REG)
3572 rs6000_debug_reg_global ();
3574 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3575 fprintf (stderr,
3576 "SImode variable mult cost = %d\n"
3577 "SImode constant mult cost = %d\n"
3578 "SImode short constant mult cost = %d\n"
3579 "DImode multipliciation cost = %d\n"
3580 "SImode division cost = %d\n"
3581 "DImode division cost = %d\n"
3582 "Simple fp operation cost = %d\n"
3583 "DFmode multiplication cost = %d\n"
3584 "SFmode division cost = %d\n"
3585 "DFmode division cost = %d\n"
3586 "cache line size = %d\n"
3587 "l1 cache size = %d\n"
3588 "l2 cache size = %d\n"
3589 "simultaneous prefetches = %d\n"
3590 "\n",
3591 rs6000_cost->mulsi,
3592 rs6000_cost->mulsi_const,
3593 rs6000_cost->mulsi_const9,
3594 rs6000_cost->muldi,
3595 rs6000_cost->divsi,
3596 rs6000_cost->divdi,
3597 rs6000_cost->fp,
3598 rs6000_cost->dmul,
3599 rs6000_cost->sdiv,
3600 rs6000_cost->ddiv,
3601 rs6000_cost->cache_line_size,
3602 rs6000_cost->l1_cache_size,
3603 rs6000_cost->l2_cache_size,
3604 rs6000_cost->simultaneous_prefetches);
3608 #if TARGET_MACHO
3609 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3611 static void
3612 darwin_rs6000_override_options (void)
3614 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3615 off. */
3616 rs6000_altivec_abi = 1;
3617 TARGET_ALTIVEC_VRSAVE = 1;
3618 rs6000_current_abi = ABI_DARWIN;
3620 if (DEFAULT_ABI == ABI_DARWIN
3621 && TARGET_64BIT)
3622 darwin_one_byte_bool = 1;
3624 if (TARGET_64BIT && ! TARGET_POWERPC64)
3626 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3627 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3629 if (flag_mkernel)
3631 rs6000_default_long_calls = 1;
3632 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3635 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3636 Altivec. */
3637 if (!flag_mkernel && !flag_apple_kext
3638 && TARGET_64BIT
3639 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3640 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3642 /* Unless the user (not the configurer) has explicitly overridden
3643 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3644 G4 unless targeting the kernel. */
3645 if (!flag_mkernel
3646 && !flag_apple_kext
3647 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3648 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3649 && ! global_options_set.x_rs6000_cpu_index)
3651 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3654 #endif
3656 /* If not otherwise specified by a target, make 'long double' equivalent to
3657 'double'. */
3659 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3660 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3661 #endif
3663 /* Return the builtin mask of the various options used that could affect which
3664 builtins were used. In the past we used target_flags, but we've run out of
3665 bits, and some options like SPE and PAIRED are no longer in
3666 target_flags. */
3668 HOST_WIDE_INT
3669 rs6000_builtin_mask_calculate (void)
3671 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3672 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3673 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3674 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3675 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3676 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3677 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3678 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3679 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3680 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3681 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3682 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3683 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3684 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3685 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3686 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3687 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3688 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3689 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3690 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3691 | ((TARGET_FLOAT128) ? RS6000_BTM_FLOAT128 : 0));
3694 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3695 to clobber the XER[CA] bit because clobbering that bit without telling
3696 the compiler worked just fine with versions of GCC before GCC 5, and
3697 breaking a lot of older code in ways that are hard to track down is
3698 not such a great idea. */
3700 static rtx_insn *
3701 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3702 vec<const char *> &/*constraints*/,
3703 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3705 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3706 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3707 return NULL;
3710 /* Override command line options. Mostly we process the processor type and
3711 sometimes adjust other TARGET_ options. */
3713 static bool
3714 rs6000_option_override_internal (bool global_init_p)
3716 bool ret = true;
3717 bool have_cpu = false;
3719 /* The default cpu requested at configure time, if any. */
3720 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3722 HOST_WIDE_INT set_masks;
3723 int cpu_index;
3724 int tune_index;
3725 struct cl_target_option *main_target_opt
3726 = ((global_init_p || target_option_default_node == NULL)
3727 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3729 /* Print defaults. */
3730 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3731 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3733 /* Remember the explicit arguments. */
3734 if (global_init_p)
3735 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3737 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3738 library functions, so warn about it. The flag may be useful for
3739 performance studies from time to time though, so don't disable it
3740 entirely. */
3741 if (global_options_set.x_rs6000_alignment_flags
3742 && rs6000_alignment_flags == MASK_ALIGN_POWER
3743 && DEFAULT_ABI == ABI_DARWIN
3744 && TARGET_64BIT)
3745 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3746 " it is incompatible with the installed C and C++ libraries");
3748 /* Numerous experiment shows that IRA based loop pressure
3749 calculation works better for RTL loop invariant motion on targets
3750 with enough (>= 32) registers. It is an expensive optimization.
3751 So it is on only for peak performance. */
3752 if (optimize >= 3 && global_init_p
3753 && !global_options_set.x_flag_ira_loop_pressure)
3754 flag_ira_loop_pressure = 1;
3756 /* Set the pointer size. */
3757 if (TARGET_64BIT)
3759 rs6000_pmode = (int)DImode;
3760 rs6000_pointer_size = 64;
3762 else
3764 rs6000_pmode = (int)SImode;
3765 rs6000_pointer_size = 32;
3768 /* Some OSs don't support saving the high part of 64-bit registers on context
3769 switch. Other OSs don't support saving Altivec registers. On those OSs,
3770 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3771 if the user wants either, the user must explicitly specify them and we
3772 won't interfere with the user's specification. */
3774 set_masks = POWERPC_MASKS;
3775 #ifdef OS_MISSING_POWERPC64
3776 if (OS_MISSING_POWERPC64)
3777 set_masks &= ~OPTION_MASK_POWERPC64;
3778 #endif
3779 #ifdef OS_MISSING_ALTIVEC
3780 if (OS_MISSING_ALTIVEC)
3781 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3782 #endif
3784 /* Don't override by the processor default if given explicitly. */
3785 set_masks &= ~rs6000_isa_flags_explicit;
3787 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3788 the cpu in a target attribute or pragma, but did not specify a tuning
3789 option, use the cpu for the tuning option rather than the option specified
3790 with -mtune on the command line. Process a '--with-cpu' configuration
3791 request as an implicit --cpu. */
3792 if (rs6000_cpu_index >= 0)
3794 cpu_index = rs6000_cpu_index;
3795 have_cpu = true;
3797 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3799 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3800 have_cpu = true;
3802 else if (implicit_cpu)
3804 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3805 have_cpu = true;
3807 else
3809 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3810 const char *default_cpu = ((!TARGET_POWERPC64)
3811 ? "powerpc"
3812 : ((BYTES_BIG_ENDIAN)
3813 ? "powerpc64"
3814 : "powerpc64le"));
3816 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3817 have_cpu = false;
3820 gcc_assert (cpu_index >= 0);
3822 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3823 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3824 with those from the cpu, except for options that were explicitly set. If
3825 we don't have a cpu, do not override the target bits set in
3826 TARGET_DEFAULT. */
3827 if (have_cpu)
3829 rs6000_isa_flags &= ~set_masks;
3830 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3831 & set_masks);
3833 else
3835 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3836 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3837 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3838 to using rs6000_isa_flags, we need to do the initialization here.
3840 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3841 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3842 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
3843 : processor_target_table[cpu_index].target_enable);
3844 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3847 if (rs6000_tune_index >= 0)
3848 tune_index = rs6000_tune_index;
3849 else if (have_cpu)
3850 rs6000_tune_index = tune_index = cpu_index;
3851 else
3853 size_t i;
3854 enum processor_type tune_proc
3855 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3857 tune_index = -1;
3858 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3859 if (processor_target_table[i].processor == tune_proc)
3861 rs6000_tune_index = tune_index = i;
3862 break;
3866 gcc_assert (tune_index >= 0);
3867 rs6000_cpu = processor_target_table[tune_index].processor;
3869 /* Pick defaults for SPE related control flags. Do this early to make sure
3870 that the TARGET_ macros are representative ASAP. */
3872 int spe_capable_cpu =
3873 (rs6000_cpu == PROCESSOR_PPC8540
3874 || rs6000_cpu == PROCESSOR_PPC8548);
3876 if (!global_options_set.x_rs6000_spe_abi)
3877 rs6000_spe_abi = spe_capable_cpu;
3879 if (!global_options_set.x_rs6000_spe)
3880 rs6000_spe = spe_capable_cpu;
3882 if (!global_options_set.x_rs6000_float_gprs)
3883 rs6000_float_gprs =
3884 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3885 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3886 : 0);
3889 if (global_options_set.x_rs6000_spe_abi
3890 && rs6000_spe_abi
3891 && !TARGET_SPE_ABI)
3892 error ("not configured for SPE ABI");
3894 if (global_options_set.x_rs6000_spe
3895 && rs6000_spe
3896 && !TARGET_SPE)
3897 error ("not configured for SPE instruction set");
3899 if (main_target_opt != NULL
3900 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3901 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3902 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3903 error ("target attribute or pragma changes SPE ABI");
3905 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3906 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3907 || rs6000_cpu == PROCESSOR_PPCE5500)
3909 if (TARGET_ALTIVEC)
3910 error ("AltiVec not supported in this target");
3911 if (TARGET_SPE)
3912 error ("SPE not supported in this target");
3914 if (rs6000_cpu == PROCESSOR_PPCE6500)
3916 if (TARGET_SPE)
3917 error ("SPE not supported in this target");
3920 /* Disable Cell microcode if we are optimizing for the Cell
3921 and not optimizing for size. */
3922 if (rs6000_gen_cell_microcode == -1)
3923 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3924 && !optimize_size);
3926 /* If we are optimizing big endian systems for space and it's OK to
3927 use instructions that would be microcoded on the Cell, use the
3928 load/store multiple and string instructions. */
3929 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3930 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3931 | OPTION_MASK_STRING);
3933 /* Don't allow -mmultiple or -mstring on little endian systems
3934 unless the cpu is a 750, because the hardware doesn't support the
3935 instructions used in little endian mode, and causes an alignment
3936 trap. The 750 does not cause an alignment trap (except when the
3937 target is unaligned). */
3939 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3941 if (TARGET_MULTIPLE)
3943 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3944 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3945 warning (0, "-mmultiple is not supported on little endian systems");
3948 if (TARGET_STRING)
3950 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3951 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3952 warning (0, "-mstring is not supported on little endian systems");
3956 /* If little-endian, default to -mstrict-align on older processors.
3957 Testing for htm matches power8 and later. */
3958 if (!BYTES_BIG_ENDIAN
3959 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3960 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3962 /* -maltivec={le,be} implies -maltivec. */
3963 if (rs6000_altivec_element_order != 0)
3964 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3966 /* Disallow -maltivec=le in big endian mode for now. This is not
3967 known to be useful for anyone. */
3968 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3970 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3971 rs6000_altivec_element_order = 0;
3974 /* Add some warnings for VSX. */
3975 if (TARGET_VSX)
3977 const char *msg = NULL;
3978 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3979 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3981 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3982 msg = N_("-mvsx requires hardware floating point");
3983 else
3985 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3986 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3989 else if (TARGET_PAIRED_FLOAT)
3990 msg = N_("-mvsx and -mpaired are incompatible");
3991 else if (TARGET_AVOID_XFORM > 0)
3992 msg = N_("-mvsx needs indexed addressing");
3993 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3994 & OPTION_MASK_ALTIVEC))
3996 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3997 msg = N_("-mvsx and -mno-altivec are incompatible");
3998 else
3999 msg = N_("-mno-altivec disables vsx");
4002 if (msg)
4004 warning (0, msg);
4005 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4006 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4010 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4011 the -mcpu setting to enable options that conflict. */
4012 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4013 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4014 | OPTION_MASK_ALTIVEC
4015 | OPTION_MASK_VSX)) != 0)
4016 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4017 | OPTION_MASK_DIRECT_MOVE)
4018 & ~rs6000_isa_flags_explicit);
4020 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4021 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4023 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4024 unless the user explicitly used the -mno-<option> to disable the code. */
4025 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM_SCALAR
4026 || TARGET_P9_DFORM_VECTOR || TARGET_P9_DFORM_BOTH > 0 || TARGET_P9_MINMAX)
4027 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4028 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4029 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4030 else if (TARGET_VSX)
4031 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4032 else if (TARGET_POPCNTD)
4033 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4034 else if (TARGET_DFP)
4035 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4036 else if (TARGET_CMPB)
4037 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4038 else if (TARGET_FPRND)
4039 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
4040 else if (TARGET_POPCNTB)
4041 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
4042 else if (TARGET_ALTIVEC)
4043 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
4045 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4047 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4048 error ("-mcrypto requires -maltivec");
4049 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4052 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4054 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4055 error ("-mdirect-move requires -mvsx");
4056 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4059 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4061 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4062 error ("-mpower8-vector requires -maltivec");
4063 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4066 if (TARGET_P8_VECTOR && !TARGET_VSX)
4068 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4069 error ("-mpower8-vector requires -mvsx");
4070 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4073 if (TARGET_VSX_TIMODE && !TARGET_VSX)
4075 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
4076 error ("-mvsx-timode requires -mvsx");
4077 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4080 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4082 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4083 error ("-mhard-dfp requires -mhard-float");
4084 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4087 /* Allow an explicit -mupper-regs to set -mupper-regs-df, -mupper-regs-di,
4088 and -mupper-regs-sf, depending on the cpu, unless the user explicitly also
4089 set the individual option. */
4090 if (TARGET_UPPER_REGS > 0)
4092 if (TARGET_VSX
4093 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4095 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DF;
4096 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4098 if (TARGET_VSX
4099 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4101 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DI;
4102 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4104 if (TARGET_P8_VECTOR
4105 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4107 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_SF;
4108 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4111 else if (TARGET_UPPER_REGS == 0)
4113 if (TARGET_VSX
4114 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4116 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4117 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4119 if (TARGET_VSX
4120 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4122 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DI;
4123 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4125 if (TARGET_P8_VECTOR
4126 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4128 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4129 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4133 if (TARGET_UPPER_REGS_DF && !TARGET_VSX)
4135 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4136 error ("-mupper-regs-df requires -mvsx");
4137 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4140 if (TARGET_UPPER_REGS_DI && !TARGET_VSX)
4142 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4143 error ("-mupper-regs-di requires -mvsx");
4144 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4147 if (TARGET_UPPER_REGS_SF && !TARGET_P8_VECTOR)
4149 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4150 error ("-mupper-regs-sf requires -mpower8-vector");
4151 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4154 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4155 silently turn off quad memory mode. */
4156 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4158 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4159 warning (0, N_("-mquad-memory requires 64-bit mode"));
4161 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4162 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4164 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4165 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4168 /* Non-atomic quad memory load/store are disabled for little endian, since
4169 the words are reversed, but atomic operations can still be done by
4170 swapping the words. */
4171 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4173 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4174 warning (0, N_("-mquad-memory is not available in little endian mode"));
4176 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4179 /* Assume if the user asked for normal quad memory instructions, they want
4180 the atomic versions as well, unless they explicity told us not to use quad
4181 word atomic instructions. */
4182 if (TARGET_QUAD_MEMORY
4183 && !TARGET_QUAD_MEMORY_ATOMIC
4184 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4185 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4187 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4188 generating power8 instructions. */
4189 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4190 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4191 & OPTION_MASK_P8_FUSION);
4193 /* Setting additional fusion flags turns on base fusion. */
4194 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4196 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4198 if (TARGET_P8_FUSION_SIGN)
4199 error ("-mpower8-fusion-sign requires -mpower8-fusion");
4201 if (TARGET_TOC_FUSION)
4202 error ("-mtoc-fusion requires -mpower8-fusion");
4204 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4206 else
4207 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4210 /* Power9 fusion is a superset over power8 fusion. */
4211 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4213 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4215 /* We prefer to not mention undocumented options in
4216 error messages. However, if users have managed to select
4217 power9-fusion without selecting power8-fusion, they
4218 already know about undocumented flags. */
4219 error ("-mpower9-fusion requires -mpower8-fusion");
4220 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4222 else
4223 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4226 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4227 generating power9 instructions. */
4228 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4229 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4230 & OPTION_MASK_P9_FUSION);
4232 /* Power8 does not fuse sign extended loads with the addis. If we are
4233 optimizing at high levels for speed, convert a sign extended load into a
4234 zero extending load, and an explicit sign extension. */
4235 if (TARGET_P8_FUSION
4236 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4237 && optimize_function_for_speed_p (cfun)
4238 && optimize >= 3)
4239 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4241 /* TOC fusion requires 64-bit and medium/large code model. */
4242 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4244 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4245 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4246 warning (0, N_("-mtoc-fusion requires 64-bit"));
4249 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4251 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4252 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4253 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4256 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4257 model. */
4258 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4259 && (TARGET_CMODEL != CMODEL_SMALL)
4260 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4261 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4263 /* ISA 3.0 vector instructions include ISA 2.07. */
4264 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4266 /* We prefer to not mention undocumented options in
4267 error messages. However, if users have managed to select
4268 power9-vector without selecting power8-vector, they
4269 already know about undocumented flags. */
4270 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4271 error ("-mpower9-vector requires -mpower8-vector");
4272 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4275 /* -mpower9-dform turns on both -mpower9-dform-scalar and
4276 -mpower9-dform-vector. */
4277 if (TARGET_P9_DFORM_BOTH > 0)
4279 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4280 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
4282 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4283 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_SCALAR;
4285 else if (TARGET_P9_DFORM_BOTH == 0)
4287 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4288 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
4290 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4291 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4294 /* ISA 3.0 D-form instructions require p9-vector and upper-regs. */
4295 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR) && !TARGET_P9_VECTOR)
4297 /* We prefer to not mention undocumented options in
4298 error messages. However, if users have managed to select
4299 power9-dform without selecting power9-vector, they
4300 already know about undocumented flags. */
4301 if (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4302 error ("-mpower9-dform requires -mpower9-vector");
4303 rs6000_isa_flags &= ~(OPTION_MASK_P9_DFORM_SCALAR
4304 | OPTION_MASK_P9_DFORM_VECTOR);
4307 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_DF)
4309 /* We prefer to not mention undocumented options in
4310 error messages. However, if users have managed to select
4311 power9-dform without selecting upper-regs-df, they
4312 already know about undocumented flags. */
4313 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4314 error ("-mpower9-dform requires -mupper-regs-df");
4315 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4318 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_SF)
4320 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4321 error ("-mpower9-dform requires -mupper-regs-sf");
4322 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4325 /* There have been bugs with -mvsx-timode that don't show up with -mlra,
4326 but do show up with -mno-lra. Given -mlra will become the default once
4327 PR 69847 is fixed, turn off the options with problems by default if
4328 -mno-lra was used, and warn if the user explicitly asked for the option.
4330 Enable -mpower9-dform-vector by default if LRA and other power9 options.
4331 Enable -mvsx-timode by default if LRA and VSX. */
4332 if (!TARGET_LRA)
4334 if (TARGET_VSX_TIMODE)
4336 if ((rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) != 0)
4337 warning (0, "-mvsx-timode might need -mlra");
4339 else
4340 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4344 else
4346 if (TARGET_VSX && !TARGET_VSX_TIMODE
4347 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
4348 rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
4351 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4352 support. If we only have ISA 2.06 support, and the user did not specify
4353 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4354 but we don't enable the full vectorization support */
4355 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4356 TARGET_ALLOW_MOVMISALIGN = 1;
4358 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4360 if (TARGET_ALLOW_MOVMISALIGN > 0
4361 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4362 error ("-mallow-movmisalign requires -mvsx");
4364 TARGET_ALLOW_MOVMISALIGN = 0;
4367 /* Determine when unaligned vector accesses are permitted, and when
4368 they are preferred over masked Altivec loads. Note that if
4369 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4370 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4371 not true. */
4372 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4374 if (!TARGET_VSX)
4376 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4377 error ("-mefficient-unaligned-vsx requires -mvsx");
4379 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4382 else if (!TARGET_ALLOW_MOVMISALIGN)
4384 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4385 error ("-mefficient-unaligned-vsx requires -mallow-movmisalign");
4387 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4391 /* __float128 requires VSX support. */
4392 if (TARGET_FLOAT128 && !TARGET_VSX)
4394 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128) != 0)
4395 error ("-mfloat128 requires VSX support");
4397 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128 | OPTION_MASK_FLOAT128_HW);
4400 /* If we have -mfloat128 and full ISA 3.0 support, enable -mfloat128-hardware
4401 by default. */
4402 if (TARGET_FLOAT128 && !TARGET_FLOAT128_HW
4403 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4404 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4406 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4407 if ((rs6000_isa_flags & OPTION_MASK_FLOAT128) != 0)
4408 rs6000_isa_flags_explicit |= OPTION_MASK_FLOAT128_HW;
4411 /* IEEE 128-bit floating point hardware instructions imply enabling
4412 __float128. */
4413 if (TARGET_FLOAT128_HW
4414 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4416 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4417 error ("-mfloat128-hardware requires full ISA 3.0 support");
4419 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4422 if (TARGET_FLOAT128_HW
4423 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128) == 0)
4424 rs6000_isa_flags |= OPTION_MASK_FLOAT128;
4426 /* Print the options after updating the defaults. */
4427 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4428 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4430 /* E500mc does "better" if we inline more aggressively. Respect the
4431 user's opinion, though. */
4432 if (rs6000_block_move_inline_limit == 0
4433 && (rs6000_cpu == PROCESSOR_PPCE500MC
4434 || rs6000_cpu == PROCESSOR_PPCE500MC64
4435 || rs6000_cpu == PROCESSOR_PPCE5500
4436 || rs6000_cpu == PROCESSOR_PPCE6500))
4437 rs6000_block_move_inline_limit = 128;
4439 /* store_one_arg depends on expand_block_move to handle at least the
4440 size of reg_parm_stack_space. */
4441 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4442 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4444 if (global_init_p)
4446 /* If the appropriate debug option is enabled, replace the target hooks
4447 with debug versions that call the real version and then prints
4448 debugging information. */
4449 if (TARGET_DEBUG_COST)
4451 targetm.rtx_costs = rs6000_debug_rtx_costs;
4452 targetm.address_cost = rs6000_debug_address_cost;
4453 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4456 if (TARGET_DEBUG_ADDR)
4458 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4459 targetm.legitimize_address = rs6000_debug_legitimize_address;
4460 rs6000_secondary_reload_class_ptr
4461 = rs6000_debug_secondary_reload_class;
4462 rs6000_secondary_memory_needed_ptr
4463 = rs6000_debug_secondary_memory_needed;
4464 rs6000_cannot_change_mode_class_ptr
4465 = rs6000_debug_cannot_change_mode_class;
4466 rs6000_preferred_reload_class_ptr
4467 = rs6000_debug_preferred_reload_class;
4468 rs6000_legitimize_reload_address_ptr
4469 = rs6000_debug_legitimize_reload_address;
4470 rs6000_mode_dependent_address_ptr
4471 = rs6000_debug_mode_dependent_address;
4474 if (rs6000_veclibabi_name)
4476 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4477 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4478 else
4480 error ("unknown vectorization library ABI type (%s) for "
4481 "-mveclibabi= switch", rs6000_veclibabi_name);
4482 ret = false;
4487 if (!global_options_set.x_rs6000_long_double_type_size)
4489 if (main_target_opt != NULL
4490 && (main_target_opt->x_rs6000_long_double_type_size
4491 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4492 error ("target attribute or pragma changes long double size");
4493 else
4494 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4497 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4498 if (!global_options_set.x_rs6000_ieeequad)
4499 rs6000_ieeequad = 1;
4500 #endif
4502 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4503 target attribute or pragma which automatically enables both options,
4504 unless the altivec ABI was set. This is set by default for 64-bit, but
4505 not for 32-bit. */
4506 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4507 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4508 | OPTION_MASK_FLOAT128)
4509 & ~rs6000_isa_flags_explicit);
4511 /* Enable Altivec ABI for AIX -maltivec. */
4512 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4514 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4515 error ("target attribute or pragma changes AltiVec ABI");
4516 else
4517 rs6000_altivec_abi = 1;
4520 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4521 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4522 be explicitly overridden in either case. */
4523 if (TARGET_ELF)
4525 if (!global_options_set.x_rs6000_altivec_abi
4526 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4528 if (main_target_opt != NULL &&
4529 !main_target_opt->x_rs6000_altivec_abi)
4530 error ("target attribute or pragma changes AltiVec ABI");
4531 else
4532 rs6000_altivec_abi = 1;
4536 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4537 So far, the only darwin64 targets are also MACH-O. */
4538 if (TARGET_MACHO
4539 && DEFAULT_ABI == ABI_DARWIN
4540 && TARGET_64BIT)
4542 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4543 error ("target attribute or pragma changes darwin64 ABI");
4544 else
4546 rs6000_darwin64_abi = 1;
4547 /* Default to natural alignment, for better performance. */
4548 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4552 /* Place FP constants in the constant pool instead of TOC
4553 if section anchors enabled. */
4554 if (flag_section_anchors
4555 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4556 TARGET_NO_FP_IN_TOC = 1;
4558 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4559 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4561 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4562 SUBTARGET_OVERRIDE_OPTIONS;
4563 #endif
4564 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4565 SUBSUBTARGET_OVERRIDE_OPTIONS;
4566 #endif
4567 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4568 SUB3TARGET_OVERRIDE_OPTIONS;
4569 #endif
4571 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4572 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4574 /* For the E500 family of cores, reset the single/double FP flags to let us
4575 check that they remain constant across attributes or pragmas. Also,
4576 clear a possible request for string instructions, not supported and which
4577 we might have silently queried above for -Os.
4579 For other families, clear ISEL in case it was set implicitly.
4582 switch (rs6000_cpu)
4584 case PROCESSOR_PPC8540:
4585 case PROCESSOR_PPC8548:
4586 case PROCESSOR_PPCE500MC:
4587 case PROCESSOR_PPCE500MC64:
4588 case PROCESSOR_PPCE5500:
4589 case PROCESSOR_PPCE6500:
4591 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
4592 rs6000_double_float = TARGET_E500_DOUBLE;
4594 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4596 break;
4598 default:
4600 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4601 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4603 break;
4606 if (main_target_opt)
4608 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4609 error ("target attribute or pragma changes single precision floating "
4610 "point");
4611 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4612 error ("target attribute or pragma changes double precision floating "
4613 "point");
4616 /* Detect invalid option combinations with E500. */
4617 CHECK_E500_OPTIONS;
4619 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4620 && rs6000_cpu != PROCESSOR_POWER5
4621 && rs6000_cpu != PROCESSOR_POWER6
4622 && rs6000_cpu != PROCESSOR_POWER7
4623 && rs6000_cpu != PROCESSOR_POWER8
4624 && rs6000_cpu != PROCESSOR_POWER9
4625 && rs6000_cpu != PROCESSOR_PPCA2
4626 && rs6000_cpu != PROCESSOR_CELL
4627 && rs6000_cpu != PROCESSOR_PPC476);
4628 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4629 || rs6000_cpu == PROCESSOR_POWER5
4630 || rs6000_cpu == PROCESSOR_POWER7
4631 || rs6000_cpu == PROCESSOR_POWER8);
4632 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4633 || rs6000_cpu == PROCESSOR_POWER5
4634 || rs6000_cpu == PROCESSOR_POWER6
4635 || rs6000_cpu == PROCESSOR_POWER7
4636 || rs6000_cpu == PROCESSOR_POWER8
4637 || rs6000_cpu == PROCESSOR_POWER9
4638 || rs6000_cpu == PROCESSOR_PPCE500MC
4639 || rs6000_cpu == PROCESSOR_PPCE500MC64
4640 || rs6000_cpu == PROCESSOR_PPCE5500
4641 || rs6000_cpu == PROCESSOR_PPCE6500);
4643 /* Allow debug switches to override the above settings. These are set to -1
4644 in rs6000.opt to indicate the user hasn't directly set the switch. */
4645 if (TARGET_ALWAYS_HINT >= 0)
4646 rs6000_always_hint = TARGET_ALWAYS_HINT;
4648 if (TARGET_SCHED_GROUPS >= 0)
4649 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4651 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4652 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4654 rs6000_sched_restricted_insns_priority
4655 = (rs6000_sched_groups ? 1 : 0);
4657 /* Handle -msched-costly-dep option. */
4658 rs6000_sched_costly_dep
4659 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4661 if (rs6000_sched_costly_dep_str)
4663 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4664 rs6000_sched_costly_dep = no_dep_costly;
4665 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4666 rs6000_sched_costly_dep = all_deps_costly;
4667 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4668 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4669 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4670 rs6000_sched_costly_dep = store_to_load_dep_costly;
4671 else
4672 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4673 atoi (rs6000_sched_costly_dep_str));
4676 /* Handle -minsert-sched-nops option. */
4677 rs6000_sched_insert_nops
4678 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4680 if (rs6000_sched_insert_nops_str)
4682 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4683 rs6000_sched_insert_nops = sched_finish_none;
4684 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4685 rs6000_sched_insert_nops = sched_finish_pad_groups;
4686 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4687 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4688 else
4689 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4690 atoi (rs6000_sched_insert_nops_str));
4693 if (global_init_p)
4695 #ifdef TARGET_REGNAMES
4696 /* If the user desires alternate register names, copy in the
4697 alternate names now. */
4698 if (TARGET_REGNAMES)
4699 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4700 #endif
4702 /* Set aix_struct_return last, after the ABI is determined.
4703 If -maix-struct-return or -msvr4-struct-return was explicitly
4704 used, don't override with the ABI default. */
4705 if (!global_options_set.x_aix_struct_return)
4706 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4708 #if 0
4709 /* IBM XL compiler defaults to unsigned bitfields. */
4710 if (TARGET_XL_COMPAT)
4711 flag_signed_bitfields = 0;
4712 #endif
4714 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4715 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4717 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4719 /* We can only guarantee the availability of DI pseudo-ops when
4720 assembling for 64-bit targets. */
4721 if (!TARGET_64BIT)
4723 targetm.asm_out.aligned_op.di = NULL;
4724 targetm.asm_out.unaligned_op.di = NULL;
4728 /* Set branch target alignment, if not optimizing for size. */
4729 if (!optimize_size)
4731 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4732 aligned 8byte to avoid misprediction by the branch predictor. */
4733 if (rs6000_cpu == PROCESSOR_TITAN
4734 || rs6000_cpu == PROCESSOR_CELL)
4736 if (align_functions <= 0)
4737 align_functions = 8;
4738 if (align_jumps <= 0)
4739 align_jumps = 8;
4740 if (align_loops <= 0)
4741 align_loops = 8;
4743 if (rs6000_align_branch_targets)
4745 if (align_functions <= 0)
4746 align_functions = 16;
4747 if (align_jumps <= 0)
4748 align_jumps = 16;
4749 if (align_loops <= 0)
4751 can_override_loop_align = 1;
4752 align_loops = 16;
4755 if (align_jumps_max_skip <= 0)
4756 align_jumps_max_skip = 15;
4757 if (align_loops_max_skip <= 0)
4758 align_loops_max_skip = 15;
4761 /* Arrange to save and restore machine status around nested functions. */
4762 init_machine_status = rs6000_init_machine_status;
4764 /* We should always be splitting complex arguments, but we can't break
4765 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4766 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4767 targetm.calls.split_complex_arg = NULL;
4770 /* Initialize rs6000_cost with the appropriate target costs. */
4771 if (optimize_size)
4772 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4773 else
4774 switch (rs6000_cpu)
4776 case PROCESSOR_RS64A:
4777 rs6000_cost = &rs64a_cost;
4778 break;
4780 case PROCESSOR_MPCCORE:
4781 rs6000_cost = &mpccore_cost;
4782 break;
4784 case PROCESSOR_PPC403:
4785 rs6000_cost = &ppc403_cost;
4786 break;
4788 case PROCESSOR_PPC405:
4789 rs6000_cost = &ppc405_cost;
4790 break;
4792 case PROCESSOR_PPC440:
4793 rs6000_cost = &ppc440_cost;
4794 break;
4796 case PROCESSOR_PPC476:
4797 rs6000_cost = &ppc476_cost;
4798 break;
4800 case PROCESSOR_PPC601:
4801 rs6000_cost = &ppc601_cost;
4802 break;
4804 case PROCESSOR_PPC603:
4805 rs6000_cost = &ppc603_cost;
4806 break;
4808 case PROCESSOR_PPC604:
4809 rs6000_cost = &ppc604_cost;
4810 break;
4812 case PROCESSOR_PPC604e:
4813 rs6000_cost = &ppc604e_cost;
4814 break;
4816 case PROCESSOR_PPC620:
4817 rs6000_cost = &ppc620_cost;
4818 break;
4820 case PROCESSOR_PPC630:
4821 rs6000_cost = &ppc630_cost;
4822 break;
4824 case PROCESSOR_CELL:
4825 rs6000_cost = &ppccell_cost;
4826 break;
4828 case PROCESSOR_PPC750:
4829 case PROCESSOR_PPC7400:
4830 rs6000_cost = &ppc750_cost;
4831 break;
4833 case PROCESSOR_PPC7450:
4834 rs6000_cost = &ppc7450_cost;
4835 break;
4837 case PROCESSOR_PPC8540:
4838 case PROCESSOR_PPC8548:
4839 rs6000_cost = &ppc8540_cost;
4840 break;
4842 case PROCESSOR_PPCE300C2:
4843 case PROCESSOR_PPCE300C3:
4844 rs6000_cost = &ppce300c2c3_cost;
4845 break;
4847 case PROCESSOR_PPCE500MC:
4848 rs6000_cost = &ppce500mc_cost;
4849 break;
4851 case PROCESSOR_PPCE500MC64:
4852 rs6000_cost = &ppce500mc64_cost;
4853 break;
4855 case PROCESSOR_PPCE5500:
4856 rs6000_cost = &ppce5500_cost;
4857 break;
4859 case PROCESSOR_PPCE6500:
4860 rs6000_cost = &ppce6500_cost;
4861 break;
4863 case PROCESSOR_TITAN:
4864 rs6000_cost = &titan_cost;
4865 break;
4867 case PROCESSOR_POWER4:
4868 case PROCESSOR_POWER5:
4869 rs6000_cost = &power4_cost;
4870 break;
4872 case PROCESSOR_POWER6:
4873 rs6000_cost = &power6_cost;
4874 break;
4876 case PROCESSOR_POWER7:
4877 rs6000_cost = &power7_cost;
4878 break;
4880 case PROCESSOR_POWER8:
4881 rs6000_cost = &power8_cost;
4882 break;
4884 case PROCESSOR_POWER9:
4885 rs6000_cost = &power9_cost;
4886 break;
4888 case PROCESSOR_PPCA2:
4889 rs6000_cost = &ppca2_cost;
4890 break;
4892 default:
4893 gcc_unreachable ();
4896 if (global_init_p)
4898 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4899 rs6000_cost->simultaneous_prefetches,
4900 global_options.x_param_values,
4901 global_options_set.x_param_values);
4902 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4903 global_options.x_param_values,
4904 global_options_set.x_param_values);
4905 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4906 rs6000_cost->cache_line_size,
4907 global_options.x_param_values,
4908 global_options_set.x_param_values);
4909 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4910 global_options.x_param_values,
4911 global_options_set.x_param_values);
4913 /* Increase loop peeling limits based on performance analysis. */
4914 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4915 global_options.x_param_values,
4916 global_options_set.x_param_values);
4917 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4918 global_options.x_param_values,
4919 global_options_set.x_param_values);
4921 /* If using typedef char *va_list, signal that
4922 __builtin_va_start (&ap, 0) can be optimized to
4923 ap = __builtin_next_arg (0). */
4924 if (DEFAULT_ABI != ABI_V4)
4925 targetm.expand_builtin_va_start = NULL;
4928 /* Set up single/double float flags.
4929 If TARGET_HARD_FLOAT is set, but neither single or double is set,
4930 then set both flags. */
4931 if (TARGET_HARD_FLOAT && TARGET_FPRS
4932 && rs6000_single_float == 0 && rs6000_double_float == 0)
4933 rs6000_single_float = rs6000_double_float = 1;
4935 /* If not explicitly specified via option, decide whether to generate indexed
4936 load/store instructions. */
4937 if (TARGET_AVOID_XFORM == -1)
4938 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4939 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4940 need indexed accesses and the type used is the scalar type of the element
4941 being loaded or stored. */
4942 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
4943 && !TARGET_ALTIVEC);
4945 /* Set the -mrecip options. */
4946 if (rs6000_recip_name)
4948 char *p = ASTRDUP (rs6000_recip_name);
4949 char *q;
4950 unsigned int mask, i;
4951 bool invert;
4953 while ((q = strtok (p, ",")) != NULL)
4955 p = NULL;
4956 if (*q == '!')
4958 invert = true;
4959 q++;
4961 else
4962 invert = false;
4964 if (!strcmp (q, "default"))
4965 mask = ((TARGET_RECIP_PRECISION)
4966 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4967 else
4969 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4970 if (!strcmp (q, recip_options[i].string))
4972 mask = recip_options[i].mask;
4973 break;
4976 if (i == ARRAY_SIZE (recip_options))
4978 error ("unknown option for -mrecip=%s", q);
4979 invert = false;
4980 mask = 0;
4981 ret = false;
4985 if (invert)
4986 rs6000_recip_control &= ~mask;
4987 else
4988 rs6000_recip_control |= mask;
4992 /* Set the builtin mask of the various options used that could affect which
4993 builtins were used. In the past we used target_flags, but we've run out
4994 of bits, and some options like SPE and PAIRED are no longer in
4995 target_flags. */
4996 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4997 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4998 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4999 rs6000_builtin_mask);
5001 /* Initialize all of the registers. */
5002 rs6000_init_hard_regno_mode_ok (global_init_p);
5004 /* Save the initial options in case the user does function specific options */
5005 if (global_init_p)
5006 target_option_default_node = target_option_current_node
5007 = build_target_option_node (&global_options);
5009 /* If not explicitly specified via option, decide whether to generate the
5010 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5011 if (TARGET_LINK_STACK == -1)
5012 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5014 return ret;
5017 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5018 define the target cpu type. */
5020 static void
5021 rs6000_option_override (void)
5023 (void) rs6000_option_override_internal (true);
5025 /* Register machine-specific passes. This needs to be done at start-up.
5026 It's convenient to do it here (like i386 does). */
5027 opt_pass *pass_analyze_swaps = make_pass_analyze_swaps (g);
5029 struct register_pass_info analyze_swaps_info
5030 = { pass_analyze_swaps, "cse1", 1, PASS_POS_INSERT_BEFORE };
5032 register_pass (&analyze_swaps_info);
5036 /* Implement targetm.vectorize.builtin_mask_for_load. */
5037 static tree
5038 rs6000_builtin_mask_for_load (void)
5040 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5041 if ((TARGET_ALTIVEC && !TARGET_VSX)
5042 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5043 return altivec_builtin_mask_for_load;
5044 else
5045 return 0;
5048 /* Implement LOOP_ALIGN. */
5050 rs6000_loop_align (rtx label)
5052 basic_block bb;
5053 int ninsns;
5055 /* Don't override loop alignment if -falign-loops was specified. */
5056 if (!can_override_loop_align)
5057 return align_loops_log;
5059 bb = BLOCK_FOR_INSN (label);
5060 ninsns = num_loop_insns(bb->loop_father);
5062 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5063 if (ninsns > 4 && ninsns <= 8
5064 && (rs6000_cpu == PROCESSOR_POWER4
5065 || rs6000_cpu == PROCESSOR_POWER5
5066 || rs6000_cpu == PROCESSOR_POWER6
5067 || rs6000_cpu == PROCESSOR_POWER7
5068 || rs6000_cpu == PROCESSOR_POWER8
5069 || rs6000_cpu == PROCESSOR_POWER9))
5070 return 5;
5071 else
5072 return align_loops_log;
5075 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5076 static int
5077 rs6000_loop_align_max_skip (rtx_insn *label)
5079 return (1 << rs6000_loop_align (label)) - 1;
5082 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5083 after applying N number of iterations. This routine does not determine
5084 how may iterations are required to reach desired alignment. */
5086 static bool
5087 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5089 if (is_packed)
5090 return false;
5092 if (TARGET_32BIT)
5094 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5095 return true;
5097 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5098 return true;
5100 return false;
5102 else
5104 if (TARGET_MACHO)
5105 return false;
5107 /* Assuming that all other types are naturally aligned. CHECKME! */
5108 return true;
5112 /* Return true if the vector misalignment factor is supported by the
5113 target. */
5114 static bool
5115 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5116 const_tree type,
5117 int misalignment,
5118 bool is_packed)
5120 if (TARGET_VSX)
5122 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5123 return true;
5125 /* Return if movmisalign pattern is not supported for this mode. */
5126 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5127 return false;
5129 if (misalignment == -1)
5131 /* Misalignment factor is unknown at compile time but we know
5132 it's word aligned. */
5133 if (rs6000_vector_alignment_reachable (type, is_packed))
5135 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5137 if (element_size == 64 || element_size == 32)
5138 return true;
5141 return false;
5144 /* VSX supports word-aligned vector. */
5145 if (misalignment % 4 == 0)
5146 return true;
5148 return false;
5151 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5152 static int
5153 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5154 tree vectype, int misalign)
5156 unsigned elements;
5157 tree elem_type;
5159 switch (type_of_cost)
5161 case scalar_stmt:
5162 case scalar_load:
5163 case scalar_store:
5164 case vector_stmt:
5165 case vector_load:
5166 case vector_store:
5167 case vec_to_scalar:
5168 case scalar_to_vec:
5169 case cond_branch_not_taken:
5170 return 1;
5172 case vec_perm:
5173 if (TARGET_VSX)
5174 return 3;
5175 else
5176 return 1;
5178 case vec_promote_demote:
5179 if (TARGET_VSX)
5180 return 4;
5181 else
5182 return 1;
5184 case cond_branch_taken:
5185 return 3;
5187 case unaligned_load:
5188 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5189 return 1;
5191 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5193 elements = TYPE_VECTOR_SUBPARTS (vectype);
5194 if (elements == 2)
5195 /* Double word aligned. */
5196 return 2;
5198 if (elements == 4)
5200 switch (misalign)
5202 case 8:
5203 /* Double word aligned. */
5204 return 2;
5206 case -1:
5207 /* Unknown misalignment. */
5208 case 4:
5209 case 12:
5210 /* Word aligned. */
5211 return 22;
5213 default:
5214 gcc_unreachable ();
5219 if (TARGET_ALTIVEC)
5220 /* Misaligned loads are not supported. */
5221 gcc_unreachable ();
5223 return 2;
5225 case unaligned_store:
5226 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5227 return 1;
5229 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5231 elements = TYPE_VECTOR_SUBPARTS (vectype);
5232 if (elements == 2)
5233 /* Double word aligned. */
5234 return 2;
5236 if (elements == 4)
5238 switch (misalign)
5240 case 8:
5241 /* Double word aligned. */
5242 return 2;
5244 case -1:
5245 /* Unknown misalignment. */
5246 case 4:
5247 case 12:
5248 /* Word aligned. */
5249 return 23;
5251 default:
5252 gcc_unreachable ();
5257 if (TARGET_ALTIVEC)
5258 /* Misaligned stores are not supported. */
5259 gcc_unreachable ();
5261 return 2;
5263 case vec_construct:
5264 elements = TYPE_VECTOR_SUBPARTS (vectype);
5265 elem_type = TREE_TYPE (vectype);
5266 /* 32-bit vectors loaded into registers are stored as double
5267 precision, so we need n/2 converts in addition to the usual
5268 n/2 merges to construct a vector of short floats from them. */
5269 if (SCALAR_FLOAT_TYPE_P (elem_type)
5270 && TYPE_PRECISION (elem_type) == 32)
5271 return elements + 1;
5272 else
5273 return elements / 2 + 1;
5275 default:
5276 gcc_unreachable ();
5280 /* Implement targetm.vectorize.preferred_simd_mode. */
5282 static machine_mode
5283 rs6000_preferred_simd_mode (machine_mode mode)
5285 if (TARGET_VSX)
5286 switch (mode)
5288 case DFmode:
5289 return V2DFmode;
5290 default:;
5292 if (TARGET_ALTIVEC || TARGET_VSX)
5293 switch (mode)
5295 case SFmode:
5296 return V4SFmode;
5297 case TImode:
5298 return V1TImode;
5299 case DImode:
5300 return V2DImode;
5301 case SImode:
5302 return V4SImode;
5303 case HImode:
5304 return V8HImode;
5305 case QImode:
5306 return V16QImode;
5307 default:;
5309 if (TARGET_SPE)
5310 switch (mode)
5312 case SFmode:
5313 return V2SFmode;
5314 case SImode:
5315 return V2SImode;
5316 default:;
5318 if (TARGET_PAIRED_FLOAT
5319 && mode == SFmode)
5320 return V2SFmode;
5321 return word_mode;
5324 typedef struct _rs6000_cost_data
5326 struct loop *loop_info;
5327 unsigned cost[3];
5328 } rs6000_cost_data;
5330 /* Test for likely overcommitment of vector hardware resources. If a
5331 loop iteration is relatively large, and too large a percentage of
5332 instructions in the loop are vectorized, the cost model may not
5333 adequately reflect delays from unavailable vector resources.
5334 Penalize the loop body cost for this case. */
5336 static void
5337 rs6000_density_test (rs6000_cost_data *data)
5339 const int DENSITY_PCT_THRESHOLD = 85;
5340 const int DENSITY_SIZE_THRESHOLD = 70;
5341 const int DENSITY_PENALTY = 10;
5342 struct loop *loop = data->loop_info;
5343 basic_block *bbs = get_loop_body (loop);
5344 int nbbs = loop->num_nodes;
5345 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5346 int i, density_pct;
5348 for (i = 0; i < nbbs; i++)
5350 basic_block bb = bbs[i];
5351 gimple_stmt_iterator gsi;
5353 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5355 gimple *stmt = gsi_stmt (gsi);
5356 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5358 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5359 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5360 not_vec_cost++;
5364 free (bbs);
5365 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5367 if (density_pct > DENSITY_PCT_THRESHOLD
5368 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5370 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5371 if (dump_enabled_p ())
5372 dump_printf_loc (MSG_NOTE, vect_location,
5373 "density %d%%, cost %d exceeds threshold, penalizing "
5374 "loop body cost by %d%%", density_pct,
5375 vec_cost + not_vec_cost, DENSITY_PENALTY);
5379 /* Implement targetm.vectorize.init_cost. */
5381 static void *
5382 rs6000_init_cost (struct loop *loop_info)
5384 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5385 data->loop_info = loop_info;
5386 data->cost[vect_prologue] = 0;
5387 data->cost[vect_body] = 0;
5388 data->cost[vect_epilogue] = 0;
5389 return data;
5392 /* Implement targetm.vectorize.add_stmt_cost. */
5394 static unsigned
5395 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5396 struct _stmt_vec_info *stmt_info, int misalign,
5397 enum vect_cost_model_location where)
5399 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5400 unsigned retval = 0;
5402 if (flag_vect_cost_model)
5404 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5405 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5406 misalign);
5407 /* Statements in an inner loop relative to the loop being
5408 vectorized are weighted more heavily. The value here is
5409 arbitrary and could potentially be improved with analysis. */
5410 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5411 count *= 50; /* FIXME. */
5413 retval = (unsigned) (count * stmt_cost);
5414 cost_data->cost[where] += retval;
5417 return retval;
5420 /* Implement targetm.vectorize.finish_cost. */
5422 static void
5423 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5424 unsigned *body_cost, unsigned *epilogue_cost)
5426 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5428 if (cost_data->loop_info)
5429 rs6000_density_test (cost_data);
5431 *prologue_cost = cost_data->cost[vect_prologue];
5432 *body_cost = cost_data->cost[vect_body];
5433 *epilogue_cost = cost_data->cost[vect_epilogue];
5436 /* Implement targetm.vectorize.destroy_cost_data. */
5438 static void
5439 rs6000_destroy_cost_data (void *data)
5441 free (data);
5444 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5445 library with vectorized intrinsics. */
5447 static tree
5448 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5449 tree type_in)
5451 char name[32];
5452 const char *suffix = NULL;
5453 tree fntype, new_fndecl, bdecl = NULL_TREE;
5454 int n_args = 1;
5455 const char *bname;
5456 machine_mode el_mode, in_mode;
5457 int n, in_n;
5459 /* Libmass is suitable for unsafe math only as it does not correctly support
5460 parts of IEEE with the required precision such as denormals. Only support
5461 it if we have VSX to use the simd d2 or f4 functions.
5462 XXX: Add variable length support. */
5463 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5464 return NULL_TREE;
5466 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5467 n = TYPE_VECTOR_SUBPARTS (type_out);
5468 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5469 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5470 if (el_mode != in_mode
5471 || n != in_n)
5472 return NULL_TREE;
5474 switch (fn)
5476 CASE_CFN_ATAN2:
5477 CASE_CFN_HYPOT:
5478 CASE_CFN_POW:
5479 n_args = 2;
5480 /* fall through */
5482 CASE_CFN_ACOS:
5483 CASE_CFN_ACOSH:
5484 CASE_CFN_ASIN:
5485 CASE_CFN_ASINH:
5486 CASE_CFN_ATAN:
5487 CASE_CFN_ATANH:
5488 CASE_CFN_CBRT:
5489 CASE_CFN_COS:
5490 CASE_CFN_COSH:
5491 CASE_CFN_ERF:
5492 CASE_CFN_ERFC:
5493 CASE_CFN_EXP2:
5494 CASE_CFN_EXP:
5495 CASE_CFN_EXPM1:
5496 CASE_CFN_LGAMMA:
5497 CASE_CFN_LOG10:
5498 CASE_CFN_LOG1P:
5499 CASE_CFN_LOG2:
5500 CASE_CFN_LOG:
5501 CASE_CFN_SIN:
5502 CASE_CFN_SINH:
5503 CASE_CFN_SQRT:
5504 CASE_CFN_TAN:
5505 CASE_CFN_TANH:
5506 if (el_mode == DFmode && n == 2)
5508 bdecl = mathfn_built_in (double_type_node, fn);
5509 suffix = "d2"; /* pow -> powd2 */
5511 else if (el_mode == SFmode && n == 4)
5513 bdecl = mathfn_built_in (float_type_node, fn);
5514 suffix = "4"; /* powf -> powf4 */
5516 else
5517 return NULL_TREE;
5518 if (!bdecl)
5519 return NULL_TREE;
5520 break;
5522 default:
5523 return NULL_TREE;
5526 gcc_assert (suffix != NULL);
5527 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5528 if (!bname)
5529 return NULL_TREE;
5531 strcpy (name, bname + sizeof ("__builtin_") - 1);
5532 strcat (name, suffix);
5534 if (n_args == 1)
5535 fntype = build_function_type_list (type_out, type_in, NULL);
5536 else if (n_args == 2)
5537 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5538 else
5539 gcc_unreachable ();
5541 /* Build a function declaration for the vectorized function. */
5542 new_fndecl = build_decl (BUILTINS_LOCATION,
5543 FUNCTION_DECL, get_identifier (name), fntype);
5544 TREE_PUBLIC (new_fndecl) = 1;
5545 DECL_EXTERNAL (new_fndecl) = 1;
5546 DECL_IS_NOVOPS (new_fndecl) = 1;
5547 TREE_READONLY (new_fndecl) = 1;
5549 return new_fndecl;
5552 /* Returns a function decl for a vectorized version of the builtin function
5553 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5554 if it is not available. */
5556 static tree
5557 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5558 tree type_in)
5560 machine_mode in_mode, out_mode;
5561 int in_n, out_n;
5563 if (TARGET_DEBUG_BUILTIN)
5564 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5565 combined_fn_name (combined_fn (fn)),
5566 GET_MODE_NAME (TYPE_MODE (type_out)),
5567 GET_MODE_NAME (TYPE_MODE (type_in)));
5569 if (TREE_CODE (type_out) != VECTOR_TYPE
5570 || TREE_CODE (type_in) != VECTOR_TYPE
5571 || !TARGET_VECTORIZE_BUILTINS)
5572 return NULL_TREE;
5574 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5575 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5576 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5577 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5579 switch (fn)
5581 CASE_CFN_COPYSIGN:
5582 if (VECTOR_UNIT_VSX_P (V2DFmode)
5583 && out_mode == DFmode && out_n == 2
5584 && in_mode == DFmode && in_n == 2)
5585 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5586 if (VECTOR_UNIT_VSX_P (V4SFmode)
5587 && out_mode == SFmode && out_n == 4
5588 && in_mode == SFmode && in_n == 4)
5589 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5590 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5591 && out_mode == SFmode && out_n == 4
5592 && in_mode == SFmode && in_n == 4)
5593 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5594 break;
5595 CASE_CFN_CEIL:
5596 if (VECTOR_UNIT_VSX_P (V2DFmode)
5597 && out_mode == DFmode && out_n == 2
5598 && in_mode == DFmode && in_n == 2)
5599 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5600 if (VECTOR_UNIT_VSX_P (V4SFmode)
5601 && out_mode == SFmode && out_n == 4
5602 && in_mode == SFmode && in_n == 4)
5603 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5604 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5605 && out_mode == SFmode && out_n == 4
5606 && in_mode == SFmode && in_n == 4)
5607 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5608 break;
5609 CASE_CFN_FLOOR:
5610 if (VECTOR_UNIT_VSX_P (V2DFmode)
5611 && out_mode == DFmode && out_n == 2
5612 && in_mode == DFmode && in_n == 2)
5613 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5614 if (VECTOR_UNIT_VSX_P (V4SFmode)
5615 && out_mode == SFmode && out_n == 4
5616 && in_mode == SFmode && in_n == 4)
5617 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5618 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5619 && out_mode == SFmode && out_n == 4
5620 && in_mode == SFmode && in_n == 4)
5621 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5622 break;
5623 CASE_CFN_FMA:
5624 if (VECTOR_UNIT_VSX_P (V2DFmode)
5625 && out_mode == DFmode && out_n == 2
5626 && in_mode == DFmode && in_n == 2)
5627 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5628 if (VECTOR_UNIT_VSX_P (V4SFmode)
5629 && out_mode == SFmode && out_n == 4
5630 && in_mode == SFmode && in_n == 4)
5631 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5632 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5633 && out_mode == SFmode && out_n == 4
5634 && in_mode == SFmode && in_n == 4)
5635 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5636 break;
5637 CASE_CFN_TRUNC:
5638 if (VECTOR_UNIT_VSX_P (V2DFmode)
5639 && out_mode == DFmode && out_n == 2
5640 && in_mode == DFmode && in_n == 2)
5641 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5642 if (VECTOR_UNIT_VSX_P (V4SFmode)
5643 && out_mode == SFmode && out_n == 4
5644 && in_mode == SFmode && in_n == 4)
5645 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5646 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5647 && out_mode == SFmode && out_n == 4
5648 && in_mode == SFmode && in_n == 4)
5649 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5650 break;
5651 CASE_CFN_NEARBYINT:
5652 if (VECTOR_UNIT_VSX_P (V2DFmode)
5653 && flag_unsafe_math_optimizations
5654 && out_mode == DFmode && out_n == 2
5655 && in_mode == DFmode && in_n == 2)
5656 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5657 if (VECTOR_UNIT_VSX_P (V4SFmode)
5658 && flag_unsafe_math_optimizations
5659 && out_mode == SFmode && out_n == 4
5660 && in_mode == SFmode && in_n == 4)
5661 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5662 break;
5663 CASE_CFN_RINT:
5664 if (VECTOR_UNIT_VSX_P (V2DFmode)
5665 && !flag_trapping_math
5666 && out_mode == DFmode && out_n == 2
5667 && in_mode == DFmode && in_n == 2)
5668 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5669 if (VECTOR_UNIT_VSX_P (V4SFmode)
5670 && !flag_trapping_math
5671 && out_mode == SFmode && out_n == 4
5672 && in_mode == SFmode && in_n == 4)
5673 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5674 break;
5675 default:
5676 break;
5679 /* Generate calls to libmass if appropriate. */
5680 if (rs6000_veclib_handler)
5681 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5683 return NULL_TREE;
5686 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5688 static tree
5689 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5690 tree type_in)
5692 machine_mode in_mode, out_mode;
5693 int in_n, out_n;
5695 if (TARGET_DEBUG_BUILTIN)
5696 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5697 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5698 GET_MODE_NAME (TYPE_MODE (type_out)),
5699 GET_MODE_NAME (TYPE_MODE (type_in)));
5701 if (TREE_CODE (type_out) != VECTOR_TYPE
5702 || TREE_CODE (type_in) != VECTOR_TYPE
5703 || !TARGET_VECTORIZE_BUILTINS)
5704 return NULL_TREE;
5706 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5707 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5708 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5709 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5711 enum rs6000_builtins fn
5712 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5713 switch (fn)
5715 case RS6000_BUILTIN_RSQRTF:
5716 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5717 && out_mode == SFmode && out_n == 4
5718 && in_mode == SFmode && in_n == 4)
5719 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5720 break;
5721 case RS6000_BUILTIN_RSQRT:
5722 if (VECTOR_UNIT_VSX_P (V2DFmode)
5723 && out_mode == DFmode && out_n == 2
5724 && in_mode == DFmode && in_n == 2)
5725 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5726 break;
5727 case RS6000_BUILTIN_RECIPF:
5728 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5729 && out_mode == SFmode && out_n == 4
5730 && in_mode == SFmode && in_n == 4)
5731 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5732 break;
5733 case RS6000_BUILTIN_RECIP:
5734 if (VECTOR_UNIT_VSX_P (V2DFmode)
5735 && out_mode == DFmode && out_n == 2
5736 && in_mode == DFmode && in_n == 2)
5737 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5738 break;
5739 default:
5740 break;
5742 return NULL_TREE;
5745 /* Default CPU string for rs6000*_file_start functions. */
5746 static const char *rs6000_default_cpu;
5748 /* Do anything needed at the start of the asm file. */
5750 static void
5751 rs6000_file_start (void)
5753 char buffer[80];
5754 const char *start = buffer;
5755 FILE *file = asm_out_file;
5757 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5759 default_file_start ();
5761 if (flag_verbose_asm)
5763 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5765 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5767 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5768 start = "";
5771 if (global_options_set.x_rs6000_cpu_index)
5773 fprintf (file, "%s -mcpu=%s", start,
5774 processor_target_table[rs6000_cpu_index].name);
5775 start = "";
5778 if (global_options_set.x_rs6000_tune_index)
5780 fprintf (file, "%s -mtune=%s", start,
5781 processor_target_table[rs6000_tune_index].name);
5782 start = "";
5785 if (PPC405_ERRATUM77)
5787 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5788 start = "";
5791 #ifdef USING_ELFOS_H
5792 switch (rs6000_sdata)
5794 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5795 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5796 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5797 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5800 if (rs6000_sdata && g_switch_value)
5802 fprintf (file, "%s -G %d", start,
5803 g_switch_value);
5804 start = "";
5806 #endif
5808 if (*start == '\0')
5809 putc ('\n', file);
5812 #ifdef USING_ELFOS_H
5813 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5814 && !global_options_set.x_rs6000_cpu_index)
5816 fputs ("\t.machine ", asm_out_file);
5817 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5818 fputs ("power9\n", asm_out_file);
5819 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5820 fputs ("power8\n", asm_out_file);
5821 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5822 fputs ("power7\n", asm_out_file);
5823 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5824 fputs ("power6\n", asm_out_file);
5825 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5826 fputs ("power5\n", asm_out_file);
5827 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5828 fputs ("power4\n", asm_out_file);
5829 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5830 fputs ("ppc64\n", asm_out_file);
5831 else
5832 fputs ("ppc\n", asm_out_file);
5834 #endif
5836 if (DEFAULT_ABI == ABI_ELFv2)
5837 fprintf (file, "\t.abiversion 2\n");
5841 /* Return nonzero if this function is known to have a null epilogue. */
5844 direct_return (void)
5846 if (reload_completed)
5848 rs6000_stack_t *info = rs6000_stack_info ();
5850 if (info->first_gp_reg_save == 32
5851 && info->first_fp_reg_save == 64
5852 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5853 && ! info->lr_save_p
5854 && ! info->cr_save_p
5855 && info->vrsave_size == 0
5856 && ! info->push_p)
5857 return 1;
5860 return 0;
5863 /* Return the number of instructions it takes to form a constant in an
5864 integer register. */
5867 num_insns_constant_wide (HOST_WIDE_INT value)
5869 /* signed constant loadable with addi */
5870 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5871 return 1;
5873 /* constant loadable with addis */
5874 else if ((value & 0xffff) == 0
5875 && (value >> 31 == -1 || value >> 31 == 0))
5876 return 1;
5878 else if (TARGET_POWERPC64)
5880 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5881 HOST_WIDE_INT high = value >> 31;
5883 if (high == 0 || high == -1)
5884 return 2;
5886 high >>= 1;
5888 if (low == 0)
5889 return num_insns_constant_wide (high) + 1;
5890 else if (high == 0)
5891 return num_insns_constant_wide (low) + 1;
5892 else
5893 return (num_insns_constant_wide (high)
5894 + num_insns_constant_wide (low) + 1);
5897 else
5898 return 2;
5902 num_insns_constant (rtx op, machine_mode mode)
5904 HOST_WIDE_INT low, high;
5906 switch (GET_CODE (op))
5908 case CONST_INT:
5909 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5910 && rs6000_is_valid_and_mask (op, mode))
5911 return 2;
5912 else
5913 return num_insns_constant_wide (INTVAL (op));
5915 case CONST_WIDE_INT:
5917 int i;
5918 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5919 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5920 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5921 return ins;
5924 case CONST_DOUBLE:
5925 if (mode == SFmode || mode == SDmode)
5927 long l;
5929 if (DECIMAL_FLOAT_MODE_P (mode))
5930 REAL_VALUE_TO_TARGET_DECIMAL32
5931 (*CONST_DOUBLE_REAL_VALUE (op), l);
5932 else
5933 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5934 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5937 long l[2];
5938 if (DECIMAL_FLOAT_MODE_P (mode))
5939 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
5940 else
5941 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5942 high = l[WORDS_BIG_ENDIAN == 0];
5943 low = l[WORDS_BIG_ENDIAN != 0];
5945 if (TARGET_32BIT)
5946 return (num_insns_constant_wide (low)
5947 + num_insns_constant_wide (high));
5948 else
5950 if ((high == 0 && low >= 0)
5951 || (high == -1 && low < 0))
5952 return num_insns_constant_wide (low);
5954 else if (rs6000_is_valid_and_mask (op, mode))
5955 return 2;
5957 else if (low == 0)
5958 return num_insns_constant_wide (high) + 1;
5960 else
5961 return (num_insns_constant_wide (high)
5962 + num_insns_constant_wide (low) + 1);
5965 default:
5966 gcc_unreachable ();
5970 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5971 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5972 corresponding element of the vector, but for V4SFmode and V2SFmode,
5973 the corresponding "float" is interpreted as an SImode integer. */
5975 HOST_WIDE_INT
5976 const_vector_elt_as_int (rtx op, unsigned int elt)
5978 rtx tmp;
5980 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5981 gcc_assert (GET_MODE (op) != V2DImode
5982 && GET_MODE (op) != V2DFmode);
5984 tmp = CONST_VECTOR_ELT (op, elt);
5985 if (GET_MODE (op) == V4SFmode
5986 || GET_MODE (op) == V2SFmode)
5987 tmp = gen_lowpart (SImode, tmp);
5988 return INTVAL (tmp);
5991 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5992 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5993 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5994 all items are set to the same value and contain COPIES replicas of the
5995 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5996 operand and the others are set to the value of the operand's msb. */
5998 static bool
5999 vspltis_constant (rtx op, unsigned step, unsigned copies)
6001 machine_mode mode = GET_MODE (op);
6002 machine_mode inner = GET_MODE_INNER (mode);
6004 unsigned i;
6005 unsigned nunits;
6006 unsigned bitsize;
6007 unsigned mask;
6009 HOST_WIDE_INT val;
6010 HOST_WIDE_INT splat_val;
6011 HOST_WIDE_INT msb_val;
6013 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6014 return false;
6016 nunits = GET_MODE_NUNITS (mode);
6017 bitsize = GET_MODE_BITSIZE (inner);
6018 mask = GET_MODE_MASK (inner);
6020 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6021 splat_val = val;
6022 msb_val = val >= 0 ? 0 : -1;
6024 /* Construct the value to be splatted, if possible. If not, return 0. */
6025 for (i = 2; i <= copies; i *= 2)
6027 HOST_WIDE_INT small_val;
6028 bitsize /= 2;
6029 small_val = splat_val >> bitsize;
6030 mask >>= bitsize;
6031 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
6032 return false;
6033 splat_val = small_val;
6036 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6037 if (EASY_VECTOR_15 (splat_val))
6040 /* Also check if we can splat, and then add the result to itself. Do so if
6041 the value is positive, of if the splat instruction is using OP's mode;
6042 for splat_val < 0, the splat and the add should use the same mode. */
6043 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6044 && (splat_val >= 0 || (step == 1 && copies == 1)))
6047 /* Also check if are loading up the most significant bit which can be done by
6048 loading up -1 and shifting the value left by -1. */
6049 else if (EASY_VECTOR_MSB (splat_val, inner))
6052 else
6053 return false;
6055 /* Check if VAL is present in every STEP-th element, and the
6056 other elements are filled with its most significant bit. */
6057 for (i = 1; i < nunits; ++i)
6059 HOST_WIDE_INT desired_val;
6060 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6061 if ((i & (step - 1)) == 0)
6062 desired_val = val;
6063 else
6064 desired_val = msb_val;
6066 if (desired_val != const_vector_elt_as_int (op, elt))
6067 return false;
6070 return true;
6073 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6074 instruction, filling in the bottom elements with 0 or -1.
6076 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6077 for the number of zeroes to shift in, or negative for the number of 0xff
6078 bytes to shift in.
6080 OP is a CONST_VECTOR. */
6083 vspltis_shifted (rtx op)
6085 machine_mode mode = GET_MODE (op);
6086 machine_mode inner = GET_MODE_INNER (mode);
6088 unsigned i, j;
6089 unsigned nunits;
6090 unsigned mask;
6092 HOST_WIDE_INT val;
6094 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6095 return false;
6097 /* We need to create pseudo registers to do the shift, so don't recognize
6098 shift vector constants after reload. */
6099 if (!can_create_pseudo_p ())
6100 return false;
6102 nunits = GET_MODE_NUNITS (mode);
6103 mask = GET_MODE_MASK (inner);
6105 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6107 /* Check if the value can really be the operand of a vspltis[bhw]. */
6108 if (EASY_VECTOR_15 (val))
6111 /* Also check if we are loading up the most significant bit which can be done
6112 by loading up -1 and shifting the value left by -1. */
6113 else if (EASY_VECTOR_MSB (val, inner))
6116 else
6117 return 0;
6119 /* Check if VAL is present in every STEP-th element until we find elements
6120 that are 0 or all 1 bits. */
6121 for (i = 1; i < nunits; ++i)
6123 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6124 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6126 /* If the value isn't the splat value, check for the remaining elements
6127 being 0/-1. */
6128 if (val != elt_val)
6130 if (elt_val == 0)
6132 for (j = i+1; j < nunits; ++j)
6134 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6135 if (const_vector_elt_as_int (op, elt2) != 0)
6136 return 0;
6139 return (nunits - i) * GET_MODE_SIZE (inner);
6142 else if ((elt_val & mask) == mask)
6144 for (j = i+1; j < nunits; ++j)
6146 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6147 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6148 return 0;
6151 return -((nunits - i) * GET_MODE_SIZE (inner));
6154 else
6155 return 0;
6159 /* If all elements are equal, we don't need to do VLSDOI. */
6160 return 0;
6164 /* Return true if OP is of the given MODE and can be synthesized
6165 with a vspltisb, vspltish or vspltisw. */
6167 bool
6168 easy_altivec_constant (rtx op, machine_mode mode)
6170 unsigned step, copies;
6172 if (mode == VOIDmode)
6173 mode = GET_MODE (op);
6174 else if (mode != GET_MODE (op))
6175 return false;
6177 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6178 constants. */
6179 if (mode == V2DFmode)
6180 return zero_constant (op, mode);
6182 else if (mode == V2DImode)
6184 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6185 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6186 return false;
6188 if (zero_constant (op, mode))
6189 return true;
6191 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6192 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6193 return true;
6195 return false;
6198 /* V1TImode is a special container for TImode. Ignore for now. */
6199 else if (mode == V1TImode)
6200 return false;
6202 /* Start with a vspltisw. */
6203 step = GET_MODE_NUNITS (mode) / 4;
6204 copies = 1;
6206 if (vspltis_constant (op, step, copies))
6207 return true;
6209 /* Then try with a vspltish. */
6210 if (step == 1)
6211 copies <<= 1;
6212 else
6213 step >>= 1;
6215 if (vspltis_constant (op, step, copies))
6216 return true;
6218 /* And finally a vspltisb. */
6219 if (step == 1)
6220 copies <<= 1;
6221 else
6222 step >>= 1;
6224 if (vspltis_constant (op, step, copies))
6225 return true;
6227 if (vspltis_shifted (op) != 0)
6228 return true;
6230 return false;
6233 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6234 result is OP. Abort if it is not possible. */
6237 gen_easy_altivec_constant (rtx op)
6239 machine_mode mode = GET_MODE (op);
6240 int nunits = GET_MODE_NUNITS (mode);
6241 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6242 unsigned step = nunits / 4;
6243 unsigned copies = 1;
6245 /* Start with a vspltisw. */
6246 if (vspltis_constant (op, step, copies))
6247 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6249 /* Then try with a vspltish. */
6250 if (step == 1)
6251 copies <<= 1;
6252 else
6253 step >>= 1;
6255 if (vspltis_constant (op, step, copies))
6256 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6258 /* And finally a vspltisb. */
6259 if (step == 1)
6260 copies <<= 1;
6261 else
6262 step >>= 1;
6264 if (vspltis_constant (op, step, copies))
6265 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6267 gcc_unreachable ();
6270 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6271 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6273 Return the number of instructions needed (1 or 2) into the address pointed
6274 via NUM_INSNS_PTR.
6276 Return the constant that is being split via CONSTANT_PTR. */
6278 bool
6279 xxspltib_constant_p (rtx op,
6280 machine_mode mode,
6281 int *num_insns_ptr,
6282 int *constant_ptr)
6284 size_t nunits = GET_MODE_NUNITS (mode);
6285 size_t i;
6286 HOST_WIDE_INT value;
6287 rtx element;
6289 /* Set the returned values to out of bound values. */
6290 *num_insns_ptr = -1;
6291 *constant_ptr = 256;
6293 if (!TARGET_P9_VECTOR)
6294 return false;
6296 if (mode == VOIDmode)
6297 mode = GET_MODE (op);
6299 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6300 return false;
6302 /* Handle (vec_duplicate <constant>). */
6303 if (GET_CODE (op) == VEC_DUPLICATE)
6305 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6306 && mode != V2DImode)
6307 return false;
6309 element = XEXP (op, 0);
6310 if (!CONST_INT_P (element))
6311 return false;
6313 value = INTVAL (element);
6314 if (!IN_RANGE (value, -128, 127))
6315 return false;
6318 /* Handle (const_vector [...]). */
6319 else if (GET_CODE (op) == CONST_VECTOR)
6321 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6322 && mode != V2DImode)
6323 return false;
6325 element = CONST_VECTOR_ELT (op, 0);
6326 if (!CONST_INT_P (element))
6327 return false;
6329 value = INTVAL (element);
6330 if (!IN_RANGE (value, -128, 127))
6331 return false;
6333 for (i = 1; i < nunits; i++)
6335 element = CONST_VECTOR_ELT (op, i);
6336 if (!CONST_INT_P (element))
6337 return false;
6339 if (value != INTVAL (element))
6340 return false;
6344 /* Handle integer constants being loaded into the upper part of the VSX
6345 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6346 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6347 else if (CONST_INT_P (op))
6349 if (!SCALAR_INT_MODE_P (mode))
6350 return false;
6352 value = INTVAL (op);
6353 if (!IN_RANGE (value, -128, 127))
6354 return false;
6356 if (!IN_RANGE (value, -1, 0))
6358 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6359 return false;
6361 if (EASY_VECTOR_15 (value))
6362 return false;
6366 else
6367 return false;
6369 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6370 sign extend. Special case 0/-1 to allow getting any VSX register instead
6371 of an Altivec register. */
6372 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6373 && EASY_VECTOR_15 (value))
6374 return false;
6376 /* Return # of instructions and the constant byte for XXSPLTIB. */
6377 if (mode == V16QImode)
6378 *num_insns_ptr = 1;
6380 else if (IN_RANGE (value, -1, 0))
6381 *num_insns_ptr = 1;
6383 else
6384 *num_insns_ptr = 2;
6386 *constant_ptr = (int) value;
6387 return true;
6390 const char *
6391 output_vec_const_move (rtx *operands)
6393 int cst, cst2, shift;
6394 machine_mode mode;
6395 rtx dest, vec;
6397 dest = operands[0];
6398 vec = operands[1];
6399 mode = GET_MODE (dest);
6401 if (TARGET_VSX)
6403 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6404 int xxspltib_value = 256;
6405 int num_insns = -1;
6407 if (zero_constant (vec, mode))
6409 if (TARGET_P9_VECTOR)
6410 return "xxspltib %x0,0";
6412 else if (dest_vmx_p)
6413 return "vspltisw %0,0";
6415 else
6416 return "xxlxor %x0,%x0,%x0";
6419 if (all_ones_constant (vec, mode))
6421 if (TARGET_P9_VECTOR)
6422 return "xxspltib %x0,255";
6424 else if (dest_vmx_p)
6425 return "vspltisw %0,-1";
6427 else if (TARGET_P8_VECTOR)
6428 return "xxlorc %x0,%x0,%x0";
6430 else
6431 gcc_unreachable ();
6434 if (TARGET_P9_VECTOR
6435 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6437 if (num_insns == 1)
6439 operands[2] = GEN_INT (xxspltib_value & 0xff);
6440 return "xxspltib %x0,%2";
6443 return "#";
6447 if (TARGET_ALTIVEC)
6449 rtx splat_vec;
6451 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6452 if (zero_constant (vec, mode))
6453 return "vspltisw %0,0";
6455 if (all_ones_constant (vec, mode))
6456 return "vspltisw %0,-1";
6458 /* Do we need to construct a value using VSLDOI? */
6459 shift = vspltis_shifted (vec);
6460 if (shift != 0)
6461 return "#";
6463 splat_vec = gen_easy_altivec_constant (vec);
6464 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6465 operands[1] = XEXP (splat_vec, 0);
6466 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6467 return "#";
6469 switch (GET_MODE (splat_vec))
6471 case V4SImode:
6472 return "vspltisw %0,%1";
6474 case V8HImode:
6475 return "vspltish %0,%1";
6477 case V16QImode:
6478 return "vspltisb %0,%1";
6480 default:
6481 gcc_unreachable ();
6485 gcc_assert (TARGET_SPE);
6487 /* Vector constant 0 is handled as a splitter of V2SI, and in the
6488 pattern of V1DI, V4HI, and V2SF.
6490 FIXME: We should probably return # and add post reload
6491 splitters for these, but this way is so easy ;-). */
6492 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
6493 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
6494 operands[1] = CONST_VECTOR_ELT (vec, 0);
6495 operands[2] = CONST_VECTOR_ELT (vec, 1);
6496 if (cst == cst2)
6497 return "li %0,%1\n\tevmergelo %0,%0,%0";
6498 else if (WORDS_BIG_ENDIAN)
6499 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
6500 else
6501 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
6504 /* Initialize TARGET of vector PAIRED to VALS. */
6506 void
6507 paired_expand_vector_init (rtx target, rtx vals)
6509 machine_mode mode = GET_MODE (target);
6510 int n_elts = GET_MODE_NUNITS (mode);
6511 int n_var = 0;
6512 rtx x, new_rtx, tmp, constant_op, op1, op2;
6513 int i;
6515 for (i = 0; i < n_elts; ++i)
6517 x = XVECEXP (vals, 0, i);
6518 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6519 ++n_var;
6521 if (n_var == 0)
6523 /* Load from constant pool. */
6524 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6525 return;
6528 if (n_var == 2)
6530 /* The vector is initialized only with non-constants. */
6531 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6532 XVECEXP (vals, 0, 1));
6534 emit_move_insn (target, new_rtx);
6535 return;
6538 /* One field is non-constant and the other one is a constant. Load the
6539 constant from the constant pool and use ps_merge instruction to
6540 construct the whole vector. */
6541 op1 = XVECEXP (vals, 0, 0);
6542 op2 = XVECEXP (vals, 0, 1);
6544 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6546 tmp = gen_reg_rtx (GET_MODE (constant_op));
6547 emit_move_insn (tmp, constant_op);
6549 if (CONSTANT_P (op1))
6550 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6551 else
6552 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6554 emit_move_insn (target, new_rtx);
6557 void
6558 paired_expand_vector_move (rtx operands[])
6560 rtx op0 = operands[0], op1 = operands[1];
6562 emit_move_insn (op0, op1);
6565 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6566 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6567 operands for the relation operation COND. This is a recursive
6568 function. */
6570 static void
6571 paired_emit_vector_compare (enum rtx_code rcode,
6572 rtx dest, rtx op0, rtx op1,
6573 rtx cc_op0, rtx cc_op1)
6575 rtx tmp = gen_reg_rtx (V2SFmode);
6576 rtx tmp1, max, min;
6578 gcc_assert (TARGET_PAIRED_FLOAT);
6579 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6581 switch (rcode)
6583 case LT:
6584 case LTU:
6585 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6586 return;
6587 case GE:
6588 case GEU:
6589 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6590 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6591 return;
6592 case LE:
6593 case LEU:
6594 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6595 return;
6596 case GT:
6597 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6598 return;
6599 case EQ:
6600 tmp1 = gen_reg_rtx (V2SFmode);
6601 max = gen_reg_rtx (V2SFmode);
6602 min = gen_reg_rtx (V2SFmode);
6603 gen_reg_rtx (V2SFmode);
6605 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6606 emit_insn (gen_selv2sf4
6607 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6608 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6609 emit_insn (gen_selv2sf4
6610 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6611 emit_insn (gen_subv2sf3 (tmp1, min, max));
6612 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6613 return;
6614 case NE:
6615 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6616 return;
6617 case UNLE:
6618 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6619 return;
6620 case UNLT:
6621 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6622 return;
6623 case UNGE:
6624 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6625 return;
6626 case UNGT:
6627 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6628 return;
6629 default:
6630 gcc_unreachable ();
6633 return;
6636 /* Emit vector conditional expression.
6637 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6638 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6641 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6642 rtx cond, rtx cc_op0, rtx cc_op1)
6644 enum rtx_code rcode = GET_CODE (cond);
6646 if (!TARGET_PAIRED_FLOAT)
6647 return 0;
6649 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6651 return 1;
6654 /* Initialize vector TARGET to VALS. */
6656 void
6657 rs6000_expand_vector_init (rtx target, rtx vals)
6659 machine_mode mode = GET_MODE (target);
6660 machine_mode inner_mode = GET_MODE_INNER (mode);
6661 int n_elts = GET_MODE_NUNITS (mode);
6662 int n_var = 0, one_var = -1;
6663 bool all_same = true, all_const_zero = true;
6664 rtx x, mem;
6665 int i;
6667 for (i = 0; i < n_elts; ++i)
6669 x = XVECEXP (vals, 0, i);
6670 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6671 ++n_var, one_var = i;
6672 else if (x != CONST0_RTX (inner_mode))
6673 all_const_zero = false;
6675 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6676 all_same = false;
6679 if (n_var == 0)
6681 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6682 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6683 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6685 /* Zero register. */
6686 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (mode, target, target)));
6687 return;
6689 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6691 /* Splat immediate. */
6692 emit_insn (gen_rtx_SET (target, const_vec));
6693 return;
6695 else
6697 /* Load from constant pool. */
6698 emit_move_insn (target, const_vec);
6699 return;
6703 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6704 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6706 rtx op0 = XVECEXP (vals, 0, 0);
6707 rtx op1 = XVECEXP (vals, 0, 1);
6708 if (all_same)
6710 if (!MEM_P (op0) && !REG_P (op0))
6711 op0 = force_reg (inner_mode, op0);
6712 if (mode == V2DFmode)
6713 emit_insn (gen_vsx_splat_v2df (target, op0));
6714 else
6715 emit_insn (gen_vsx_splat_v2di (target, op0));
6717 else
6719 op0 = force_reg (inner_mode, op0);
6720 op1 = force_reg (inner_mode, op1);
6721 if (mode == V2DFmode)
6722 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
6723 else
6724 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
6726 return;
6729 /* Word values on ISA 3.0 can use mtvsrws, lxvwsx, or vspltisw. V4SF is
6730 complicated since scalars are stored as doubles in the registers. */
6731 if (TARGET_P9_VECTOR && mode == V4SImode && all_same
6732 && VECTOR_MEM_VSX_P (mode))
6734 emit_insn (gen_vsx_splat_v4si (target, XVECEXP (vals, 0, 0)));
6735 return;
6738 /* With single precision floating point on VSX, know that internally single
6739 precision is actually represented as a double, and either make 2 V2DF
6740 vectors, and convert these vectors to single precision, or do one
6741 conversion, and splat the result to the other elements. */
6742 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
6744 if (all_same)
6746 rtx op0 = XVECEXP (vals, 0, 0);
6748 if (TARGET_P9_VECTOR)
6749 emit_insn (gen_vsx_splat_v4sf (target, op0));
6751 else
6753 rtx freg = gen_reg_rtx (V4SFmode);
6754 rtx sreg = force_reg (SFmode, op0);
6755 rtx cvt = (TARGET_XSCVDPSPN
6756 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6757 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6759 emit_insn (cvt);
6760 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6761 const0_rtx));
6764 else
6766 rtx dbl_even = gen_reg_rtx (V2DFmode);
6767 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6768 rtx flt_even = gen_reg_rtx (V4SFmode);
6769 rtx flt_odd = gen_reg_rtx (V4SFmode);
6770 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6771 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6772 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6773 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6775 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6776 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6777 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6778 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6779 rs6000_expand_extract_even (target, flt_even, flt_odd);
6781 return;
6784 /* Store value to stack temp. Load vector element. Splat. However, splat
6785 of 64-bit items is not supported on Altivec. */
6786 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6788 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6789 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6790 XVECEXP (vals, 0, 0));
6791 x = gen_rtx_UNSPEC (VOIDmode,
6792 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6793 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6794 gen_rtvec (2,
6795 gen_rtx_SET (target, mem),
6796 x)));
6797 x = gen_rtx_VEC_SELECT (inner_mode, target,
6798 gen_rtx_PARALLEL (VOIDmode,
6799 gen_rtvec (1, const0_rtx)));
6800 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6801 return;
6804 /* One field is non-constant. Load constant then overwrite
6805 varying field. */
6806 if (n_var == 1)
6808 rtx copy = copy_rtx (vals);
6810 /* Load constant part of vector, substitute neighboring value for
6811 varying element. */
6812 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6813 rs6000_expand_vector_init (target, copy);
6815 /* Insert variable. */
6816 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6817 return;
6820 /* Construct the vector in memory one field at a time
6821 and load the whole vector. */
6822 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6823 for (i = 0; i < n_elts; i++)
6824 emit_move_insn (adjust_address_nv (mem, inner_mode,
6825 i * GET_MODE_SIZE (inner_mode)),
6826 XVECEXP (vals, 0, i));
6827 emit_move_insn (target, mem);
6830 /* Set field ELT of TARGET to VAL. */
6832 void
6833 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6835 machine_mode mode = GET_MODE (target);
6836 machine_mode inner_mode = GET_MODE_INNER (mode);
6837 rtx reg = gen_reg_rtx (mode);
6838 rtx mask, mem, x;
6839 int width = GET_MODE_SIZE (inner_mode);
6840 int i;
6842 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6844 rtx (*set_func) (rtx, rtx, rtx, rtx)
6845 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
6846 emit_insn (set_func (target, target, val, GEN_INT (elt)));
6847 return;
6850 /* Simplify setting single element vectors like V1TImode. */
6851 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6853 emit_move_insn (target, gen_lowpart (mode, val));
6854 return;
6857 /* Load single variable value. */
6858 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6859 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6860 x = gen_rtx_UNSPEC (VOIDmode,
6861 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6862 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6863 gen_rtvec (2,
6864 gen_rtx_SET (reg, mem),
6865 x)));
6867 /* Linear sequence. */
6868 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6869 for (i = 0; i < 16; ++i)
6870 XVECEXP (mask, 0, i) = GEN_INT (i);
6872 /* Set permute mask to insert element into target. */
6873 for (i = 0; i < width; ++i)
6874 XVECEXP (mask, 0, elt*width + i)
6875 = GEN_INT (i + 0x10);
6876 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6878 if (BYTES_BIG_ENDIAN)
6879 x = gen_rtx_UNSPEC (mode,
6880 gen_rtvec (3, target, reg,
6881 force_reg (V16QImode, x)),
6882 UNSPEC_VPERM);
6883 else
6885 if (TARGET_P9_VECTOR)
6886 x = gen_rtx_UNSPEC (mode,
6887 gen_rtvec (3, target, reg,
6888 force_reg (V16QImode, x)),
6889 UNSPEC_VPERMR);
6890 else
6892 /* Invert selector. We prefer to generate VNAND on P8 so
6893 that future fusion opportunities can kick in, but must
6894 generate VNOR elsewhere. */
6895 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6896 rtx iorx = (TARGET_P8_VECTOR
6897 ? gen_rtx_IOR (V16QImode, notx, notx)
6898 : gen_rtx_AND (V16QImode, notx, notx));
6899 rtx tmp = gen_reg_rtx (V16QImode);
6900 emit_insn (gen_rtx_SET (tmp, iorx));
6902 /* Permute with operands reversed and adjusted selector. */
6903 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6904 UNSPEC_VPERM);
6908 emit_insn (gen_rtx_SET (target, x));
6911 /* Extract field ELT from VEC into TARGET. */
6913 void
6914 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
6916 machine_mode mode = GET_MODE (vec);
6917 machine_mode inner_mode = GET_MODE_INNER (mode);
6918 rtx mem;
6920 if (VECTOR_MEM_VSX_P (mode))
6922 switch (mode)
6924 default:
6925 break;
6926 case V1TImode:
6927 gcc_assert (elt == 0 && inner_mode == TImode);
6928 emit_move_insn (target, gen_lowpart (TImode, vec));
6929 break;
6930 case V2DFmode:
6931 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
6932 return;
6933 case V2DImode:
6934 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
6935 return;
6936 case V4SFmode:
6937 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
6938 return;
6939 case V16QImode:
6940 if (TARGET_VEXTRACTUB)
6942 emit_insn (gen_vsx_extract_v16qi (target, vec, GEN_INT (elt)));
6943 return;
6945 else
6946 break;
6947 case V8HImode:
6948 if (TARGET_VEXTRACTUB)
6950 emit_insn (gen_vsx_extract_v8hi (target, vec, GEN_INT (elt)));
6951 return;
6953 else
6954 break;
6955 case V4SImode:
6956 if (TARGET_VEXTRACTUB)
6958 emit_insn (gen_vsx_extract_v4si (target, vec, GEN_INT (elt)));
6959 return;
6961 else
6962 break;
6966 /* Allocate mode-sized buffer. */
6967 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6969 emit_move_insn (mem, vec);
6971 /* Add offset to field within buffer matching vector element. */
6972 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
6974 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6977 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
6979 bool
6980 invalid_e500_subreg (rtx op, machine_mode mode)
6982 if (TARGET_E500_DOUBLE)
6984 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
6985 subreg:TI and reg:TF. Decimal float modes are like integer
6986 modes (only low part of each register used) for this
6987 purpose. */
6988 if (GET_CODE (op) == SUBREG
6989 && (mode == SImode || mode == DImode || mode == TImode
6990 || mode == DDmode || mode == TDmode || mode == PTImode)
6991 && REG_P (SUBREG_REG (op))
6992 && (GET_MODE (SUBREG_REG (op)) == DFmode
6993 || GET_MODE (SUBREG_REG (op)) == TFmode
6994 || GET_MODE (SUBREG_REG (op)) == IFmode
6995 || GET_MODE (SUBREG_REG (op)) == KFmode))
6996 return true;
6998 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
6999 reg:TI. */
7000 if (GET_CODE (op) == SUBREG
7001 && (mode == DFmode || mode == TFmode || mode == IFmode
7002 || mode == KFmode)
7003 && REG_P (SUBREG_REG (op))
7004 && (GET_MODE (SUBREG_REG (op)) == DImode
7005 || GET_MODE (SUBREG_REG (op)) == TImode
7006 || GET_MODE (SUBREG_REG (op)) == PTImode
7007 || GET_MODE (SUBREG_REG (op)) == DDmode
7008 || GET_MODE (SUBREG_REG (op)) == TDmode))
7009 return true;
7012 if (TARGET_SPE
7013 && GET_CODE (op) == SUBREG
7014 && mode == SImode
7015 && REG_P (SUBREG_REG (op))
7016 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
7017 return true;
7019 return false;
7022 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7023 selects whether the alignment is abi mandated, optional, or
7024 both abi and optional alignment. */
7026 unsigned int
7027 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7029 if (how != align_opt)
7031 if (TREE_CODE (type) == VECTOR_TYPE)
7033 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
7034 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
7036 if (align < 64)
7037 align = 64;
7039 else if (align < 128)
7040 align = 128;
7042 else if (TARGET_E500_DOUBLE
7043 && TREE_CODE (type) == REAL_TYPE
7044 && TYPE_MODE (type) == DFmode)
7046 if (align < 64)
7047 align = 64;
7051 if (how != align_abi)
7053 if (TREE_CODE (type) == ARRAY_TYPE
7054 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7056 if (align < BITS_PER_WORD)
7057 align = BITS_PER_WORD;
7061 return align;
7064 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7066 bool
7067 rs6000_special_adjust_field_align_p (tree field, unsigned int computed)
7069 if (TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7071 if (computed != 128)
7073 static bool warned;
7074 if (!warned && warn_psabi)
7076 warned = true;
7077 inform (input_location,
7078 "the layout of aggregates containing vectors with"
7079 " %d-byte alignment has changed in GCC 5",
7080 computed / BITS_PER_UNIT);
7083 /* In current GCC there is no special case. */
7084 return false;
7087 return false;
7090 /* AIX increases natural record alignment to doubleword if the first
7091 field is an FP double while the FP fields remain word aligned. */
7093 unsigned int
7094 rs6000_special_round_type_align (tree type, unsigned int computed,
7095 unsigned int specified)
7097 unsigned int align = MAX (computed, specified);
7098 tree field = TYPE_FIELDS (type);
7100 /* Skip all non field decls */
7101 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7102 field = DECL_CHAIN (field);
7104 if (field != NULL && field != type)
7106 type = TREE_TYPE (field);
7107 while (TREE_CODE (type) == ARRAY_TYPE)
7108 type = TREE_TYPE (type);
7110 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7111 align = MAX (align, 64);
7114 return align;
7117 /* Darwin increases record alignment to the natural alignment of
7118 the first field. */
7120 unsigned int
7121 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7122 unsigned int specified)
7124 unsigned int align = MAX (computed, specified);
7126 if (TYPE_PACKED (type))
7127 return align;
7129 /* Find the first field, looking down into aggregates. */
7130 do {
7131 tree field = TYPE_FIELDS (type);
7132 /* Skip all non field decls */
7133 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7134 field = DECL_CHAIN (field);
7135 if (! field)
7136 break;
7137 /* A packed field does not contribute any extra alignment. */
7138 if (DECL_PACKED (field))
7139 return align;
7140 type = TREE_TYPE (field);
7141 while (TREE_CODE (type) == ARRAY_TYPE)
7142 type = TREE_TYPE (type);
7143 } while (AGGREGATE_TYPE_P (type));
7145 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7146 align = MAX (align, TYPE_ALIGN (type));
7148 return align;
7151 /* Return 1 for an operand in small memory on V.4/eabi. */
7154 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7155 machine_mode mode ATTRIBUTE_UNUSED)
7157 #if TARGET_ELF
7158 rtx sym_ref;
7160 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7161 return 0;
7163 if (DEFAULT_ABI != ABI_V4)
7164 return 0;
7166 /* Vector and float memory instructions have a limited offset on the
7167 SPE, so using a vector or float variable directly as an operand is
7168 not useful. */
7169 if (TARGET_SPE
7170 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
7171 return 0;
7173 if (GET_CODE (op) == SYMBOL_REF)
7174 sym_ref = op;
7176 else if (GET_CODE (op) != CONST
7177 || GET_CODE (XEXP (op, 0)) != PLUS
7178 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7179 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7180 return 0;
7182 else
7184 rtx sum = XEXP (op, 0);
7185 HOST_WIDE_INT summand;
7187 /* We have to be careful here, because it is the referenced address
7188 that must be 32k from _SDA_BASE_, not just the symbol. */
7189 summand = INTVAL (XEXP (sum, 1));
7190 if (summand < 0 || summand > g_switch_value)
7191 return 0;
7193 sym_ref = XEXP (sum, 0);
7196 return SYMBOL_REF_SMALL_P (sym_ref);
7197 #else
7198 return 0;
7199 #endif
7202 /* Return true if either operand is a general purpose register. */
7204 bool
7205 gpr_or_gpr_p (rtx op0, rtx op1)
7207 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7208 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7211 /* Return true if this is a move direct operation between GPR registers and
7212 floating point/VSX registers. */
7214 bool
7215 direct_move_p (rtx op0, rtx op1)
7217 int regno0, regno1;
7219 if (!REG_P (op0) || !REG_P (op1))
7220 return false;
7222 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7223 return false;
7225 regno0 = REGNO (op0);
7226 regno1 = REGNO (op1);
7227 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7228 return false;
7230 if (INT_REGNO_P (regno0))
7231 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7233 else if (INT_REGNO_P (regno1))
7235 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7236 return true;
7238 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7239 return true;
7242 return false;
7245 /* Return true if the OFFSET is valid for the quad address instructions that
7246 use d-form (register + offset) addressing. */
7248 static inline bool
7249 quad_address_offset_p (HOST_WIDE_INT offset)
7251 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7254 /* Return true if the ADDR is an acceptable address for a quad memory
7255 operation of mode MODE (either LQ/STQ for general purpose registers, or
7256 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7257 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7258 3.0 LXV/STXV instruction. */
7260 bool
7261 quad_address_p (rtx addr, machine_mode mode, bool strict)
7263 rtx op0, op1;
7265 if (GET_MODE_SIZE (mode) != 16)
7266 return false;
7268 if (legitimate_indirect_address_p (addr, strict))
7269 return true;
7271 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
7272 return false;
7274 if (GET_CODE (addr) != PLUS)
7275 return false;
7277 op0 = XEXP (addr, 0);
7278 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7279 return false;
7281 op1 = XEXP (addr, 1);
7282 if (!CONST_INT_P (op1))
7283 return false;
7285 return quad_address_offset_p (INTVAL (op1));
7288 /* Return true if this is a load or store quad operation. This function does
7289 not handle the atomic quad memory instructions. */
7291 bool
7292 quad_load_store_p (rtx op0, rtx op1)
7294 bool ret;
7296 if (!TARGET_QUAD_MEMORY)
7297 ret = false;
7299 else if (REG_P (op0) && MEM_P (op1))
7300 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7301 && quad_memory_operand (op1, GET_MODE (op1))
7302 && !reg_overlap_mentioned_p (op0, op1));
7304 else if (MEM_P (op0) && REG_P (op1))
7305 ret = (quad_memory_operand (op0, GET_MODE (op0))
7306 && quad_int_reg_operand (op1, GET_MODE (op1)));
7308 else
7309 ret = false;
7311 if (TARGET_DEBUG_ADDR)
7313 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7314 ret ? "true" : "false");
7315 debug_rtx (gen_rtx_SET (op0, op1));
7318 return ret;
7321 /* Given an address, return a constant offset term if one exists. */
7323 static rtx
7324 address_offset (rtx op)
7326 if (GET_CODE (op) == PRE_INC
7327 || GET_CODE (op) == PRE_DEC)
7328 op = XEXP (op, 0);
7329 else if (GET_CODE (op) == PRE_MODIFY
7330 || GET_CODE (op) == LO_SUM)
7331 op = XEXP (op, 1);
7333 if (GET_CODE (op) == CONST)
7334 op = XEXP (op, 0);
7336 if (GET_CODE (op) == PLUS)
7337 op = XEXP (op, 1);
7339 if (CONST_INT_P (op))
7340 return op;
7342 return NULL_RTX;
7345 /* Return true if the MEM operand is a memory operand suitable for use
7346 with a (full width, possibly multiple) gpr load/store. On
7347 powerpc64 this means the offset must be divisible by 4.
7348 Implements 'Y' constraint.
7350 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7351 a constraint function we know the operand has satisfied a suitable
7352 memory predicate. Also accept some odd rtl generated by reload
7353 (see rs6000_legitimize_reload_address for various forms). It is
7354 important that reload rtl be accepted by appropriate constraints
7355 but not by the operand predicate.
7357 Offsetting a lo_sum should not be allowed, except where we know by
7358 alignment that a 32k boundary is not crossed, but see the ???
7359 comment in rs6000_legitimize_reload_address. Note that by
7360 "offsetting" here we mean a further offset to access parts of the
7361 MEM. It's fine to have a lo_sum where the inner address is offset
7362 from a sym, since the same sym+offset will appear in the high part
7363 of the address calculation. */
7365 bool
7366 mem_operand_gpr (rtx op, machine_mode mode)
7368 unsigned HOST_WIDE_INT offset;
7369 int extra;
7370 rtx addr = XEXP (op, 0);
7372 op = address_offset (addr);
7373 if (op == NULL_RTX)
7374 return true;
7376 offset = INTVAL (op);
7377 if (TARGET_POWERPC64 && (offset & 3) != 0)
7378 return false;
7380 if (mode_supports_vsx_dform_quad (mode)
7381 && !quad_address_offset_p (offset))
7382 return false;
7384 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7385 if (extra < 0)
7386 extra = 0;
7388 if (GET_CODE (addr) == LO_SUM)
7389 /* For lo_sum addresses, we must allow any offset except one that
7390 causes a wrap, so test only the low 16 bits. */
7391 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7393 return offset + 0x8000 < 0x10000u - extra;
7396 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7398 static bool
7399 reg_offset_addressing_ok_p (machine_mode mode)
7401 switch (mode)
7403 case V16QImode:
7404 case V8HImode:
7405 case V4SFmode:
7406 case V4SImode:
7407 case V2DFmode:
7408 case V2DImode:
7409 case V1TImode:
7410 case TImode:
7411 case TFmode:
7412 case KFmode:
7413 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7414 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7415 a vector mode, if we want to use the VSX registers to move it around,
7416 we need to restrict ourselves to reg+reg addressing. Similarly for
7417 IEEE 128-bit floating point that is passed in a single vector
7418 register. */
7419 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7420 return mode_supports_vsx_dform_quad (mode);
7421 break;
7423 case V4HImode:
7424 case V2SImode:
7425 case V1DImode:
7426 case V2SFmode:
7427 /* Paired vector modes. Only reg+reg addressing is valid. */
7428 if (TARGET_PAIRED_FLOAT)
7429 return false;
7430 break;
7432 case SDmode:
7433 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7434 addressing for the LFIWZX and STFIWX instructions. */
7435 if (TARGET_NO_SDMODE_STACK)
7436 return false;
7437 break;
7439 default:
7440 break;
7443 return true;
7446 static bool
7447 virtual_stack_registers_memory_p (rtx op)
7449 int regnum;
7451 if (GET_CODE (op) == REG)
7452 regnum = REGNO (op);
7454 else if (GET_CODE (op) == PLUS
7455 && GET_CODE (XEXP (op, 0)) == REG
7456 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7457 regnum = REGNO (XEXP (op, 0));
7459 else
7460 return false;
7462 return (regnum >= FIRST_VIRTUAL_REGISTER
7463 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7466 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7467 is known to not straddle a 32k boundary. This function is used
7468 to determine whether -mcmodel=medium code can use TOC pointer
7469 relative addressing for OP. This means the alignment of the TOC
7470 pointer must also be taken into account, and unfortunately that is
7471 only 8 bytes. */
7473 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7474 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7475 #endif
7477 static bool
7478 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7479 machine_mode mode)
7481 tree decl;
7482 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7484 if (GET_CODE (op) != SYMBOL_REF)
7485 return false;
7487 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7488 SYMBOL_REF. */
7489 if (mode_supports_vsx_dform_quad (mode))
7490 return false;
7492 dsize = GET_MODE_SIZE (mode);
7493 decl = SYMBOL_REF_DECL (op);
7494 if (!decl)
7496 if (dsize == 0)
7497 return false;
7499 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7500 replacing memory addresses with an anchor plus offset. We
7501 could find the decl by rummaging around in the block->objects
7502 VEC for the given offset but that seems like too much work. */
7503 dalign = BITS_PER_UNIT;
7504 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7505 && SYMBOL_REF_ANCHOR_P (op)
7506 && SYMBOL_REF_BLOCK (op) != NULL)
7508 struct object_block *block = SYMBOL_REF_BLOCK (op);
7510 dalign = block->alignment;
7511 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7513 else if (CONSTANT_POOL_ADDRESS_P (op))
7515 /* It would be nice to have get_pool_align().. */
7516 machine_mode cmode = get_pool_mode (op);
7518 dalign = GET_MODE_ALIGNMENT (cmode);
7521 else if (DECL_P (decl))
7523 dalign = DECL_ALIGN (decl);
7525 if (dsize == 0)
7527 /* Allow BLKmode when the entire object is known to not
7528 cross a 32k boundary. */
7529 if (!DECL_SIZE_UNIT (decl))
7530 return false;
7532 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7533 return false;
7535 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7536 if (dsize > 32768)
7537 return false;
7539 dalign /= BITS_PER_UNIT;
7540 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7541 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7542 return dalign >= dsize;
7545 else
7546 gcc_unreachable ();
7548 /* Find how many bits of the alignment we know for this access. */
7549 dalign /= BITS_PER_UNIT;
7550 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7551 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7552 mask = dalign - 1;
7553 lsb = offset & -offset;
7554 mask &= lsb - 1;
7555 dalign = mask + 1;
7557 return dalign >= dsize;
7560 static bool
7561 constant_pool_expr_p (rtx op)
7563 rtx base, offset;
7565 split_const (op, &base, &offset);
7566 return (GET_CODE (base) == SYMBOL_REF
7567 && CONSTANT_POOL_ADDRESS_P (base)
7568 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7571 static const_rtx tocrel_base, tocrel_offset;
7573 /* Return true if OP is a toc pointer relative address (the output
7574 of create_TOC_reference). If STRICT, do not match high part or
7575 non-split -mcmodel=large/medium toc pointer relative addresses. */
7577 bool
7578 toc_relative_expr_p (const_rtx op, bool strict)
7580 if (!TARGET_TOC)
7581 return false;
7583 if (TARGET_CMODEL != CMODEL_SMALL)
7585 /* Only match the low part. */
7586 if (GET_CODE (op) == LO_SUM
7587 && REG_P (XEXP (op, 0))
7588 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
7589 op = XEXP (op, 1);
7590 else if (strict)
7591 return false;
7594 tocrel_base = op;
7595 tocrel_offset = const0_rtx;
7596 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7598 tocrel_base = XEXP (op, 0);
7599 tocrel_offset = XEXP (op, 1);
7602 return (GET_CODE (tocrel_base) == UNSPEC
7603 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
7606 /* Return true if X is a constant pool address, and also for cmodel=medium
7607 if X is a toc-relative address known to be offsettable within MODE. */
7609 bool
7610 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7611 bool strict)
7613 return (toc_relative_expr_p (x, strict)
7614 && (TARGET_CMODEL != CMODEL_MEDIUM
7615 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7616 || mode == QImode
7617 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7618 INTVAL (tocrel_offset), mode)));
7621 static bool
7622 legitimate_small_data_p (machine_mode mode, rtx x)
7624 return (DEFAULT_ABI == ABI_V4
7625 && !flag_pic && !TARGET_TOC
7626 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7627 && small_data_operand (x, mode));
7630 /* SPE offset addressing is limited to 5-bits worth of double words. */
7631 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
7633 bool
7634 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7635 bool strict, bool worst_case)
7637 unsigned HOST_WIDE_INT offset;
7638 unsigned int extra;
7640 if (GET_CODE (x) != PLUS)
7641 return false;
7642 if (!REG_P (XEXP (x, 0)))
7643 return false;
7644 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7645 return false;
7646 if (mode_supports_vsx_dform_quad (mode))
7647 return quad_address_p (x, mode, strict);
7648 if (!reg_offset_addressing_ok_p (mode))
7649 return virtual_stack_registers_memory_p (x);
7650 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7651 return true;
7652 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
7653 return false;
7655 offset = INTVAL (XEXP (x, 1));
7656 extra = 0;
7657 switch (mode)
7659 case V4HImode:
7660 case V2SImode:
7661 case V1DImode:
7662 case V2SFmode:
7663 /* SPE vector modes. */
7664 return SPE_CONST_OFFSET_OK (offset);
7666 case DFmode:
7667 case DDmode:
7668 case DImode:
7669 /* On e500v2, we may have:
7671 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
7673 Which gets addressed with evldd instructions. */
7674 if (TARGET_E500_DOUBLE)
7675 return SPE_CONST_OFFSET_OK (offset);
7677 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7678 addressing. */
7679 if (VECTOR_MEM_VSX_P (mode))
7680 return false;
7682 if (!worst_case)
7683 break;
7684 if (!TARGET_POWERPC64)
7685 extra = 4;
7686 else if (offset & 3)
7687 return false;
7688 break;
7690 case TFmode:
7691 case IFmode:
7692 case KFmode:
7693 if (TARGET_E500_DOUBLE)
7694 return (SPE_CONST_OFFSET_OK (offset)
7695 && SPE_CONST_OFFSET_OK (offset + 8));
7696 /* fall through */
7698 case TDmode:
7699 case TImode:
7700 case PTImode:
7701 extra = 8;
7702 if (!worst_case)
7703 break;
7704 if (!TARGET_POWERPC64)
7705 extra = 12;
7706 else if (offset & 3)
7707 return false;
7708 break;
7710 default:
7711 break;
7714 offset += 0x8000;
7715 return offset < 0x10000 - extra;
7718 bool
7719 legitimate_indexed_address_p (rtx x, int strict)
7721 rtx op0, op1;
7723 if (GET_CODE (x) != PLUS)
7724 return false;
7726 op0 = XEXP (x, 0);
7727 op1 = XEXP (x, 1);
7729 /* Recognize the rtl generated by reload which we know will later be
7730 replaced with proper base and index regs. */
7731 if (!strict
7732 && reload_in_progress
7733 && (REG_P (op0) || GET_CODE (op0) == PLUS)
7734 && REG_P (op1))
7735 return true;
7737 return (REG_P (op0) && REG_P (op1)
7738 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
7739 && INT_REG_OK_FOR_INDEX_P (op1, strict))
7740 || (INT_REG_OK_FOR_BASE_P (op1, strict)
7741 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
7744 bool
7745 avoiding_indexed_address_p (machine_mode mode)
7747 /* Avoid indexed addressing for modes that have non-indexed
7748 load/store instruction forms. */
7749 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
7752 bool
7753 legitimate_indirect_address_p (rtx x, int strict)
7755 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
7758 bool
7759 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
7761 if (!TARGET_MACHO || !flag_pic
7762 || mode != SImode || GET_CODE (x) != MEM)
7763 return false;
7764 x = XEXP (x, 0);
7766 if (GET_CODE (x) != LO_SUM)
7767 return false;
7768 if (GET_CODE (XEXP (x, 0)) != REG)
7769 return false;
7770 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
7771 return false;
7772 x = XEXP (x, 1);
7774 return CONSTANT_P (x);
7777 static bool
7778 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
7780 if (GET_CODE (x) != LO_SUM)
7781 return false;
7782 if (GET_CODE (XEXP (x, 0)) != REG)
7783 return false;
7784 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7785 return false;
7786 /* quad word addresses are restricted, and we can't use LO_SUM. */
7787 if (mode_supports_vsx_dform_quad (mode))
7788 return false;
7789 /* Restrict addressing for DI because of our SUBREG hackery. */
7790 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
7791 return false;
7792 x = XEXP (x, 1);
7794 if (TARGET_ELF || TARGET_MACHO)
7796 bool large_toc_ok;
7798 if (DEFAULT_ABI == ABI_V4 && flag_pic)
7799 return false;
7800 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
7801 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
7802 recognizes some LO_SUM addresses as valid although this
7803 function says opposite. In most cases, LRA through different
7804 transformations can generate correct code for address reloads.
7805 It can not manage only some LO_SUM cases. So we need to add
7806 code analogous to one in rs6000_legitimize_reload_address for
7807 LOW_SUM here saying that some addresses are still valid. */
7808 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
7809 && small_toc_ref (x, VOIDmode));
7810 if (TARGET_TOC && ! large_toc_ok)
7811 return false;
7812 if (GET_MODE_NUNITS (mode) != 1)
7813 return false;
7814 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
7815 && !(/* ??? Assume floating point reg based on mode? */
7816 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
7817 && (mode == DFmode || mode == DDmode)))
7818 return false;
7820 return CONSTANT_P (x) || large_toc_ok;
7823 return false;
7827 /* Try machine-dependent ways of modifying an illegitimate address
7828 to be legitimate. If we find one, return the new, valid address.
7829 This is used from only one place: `memory_address' in explow.c.
7831 OLDX is the address as it was before break_out_memory_refs was
7832 called. In some cases it is useful to look at this to decide what
7833 needs to be done.
7835 It is always safe for this function to do nothing. It exists to
7836 recognize opportunities to optimize the output.
7838 On RS/6000, first check for the sum of a register with a constant
7839 integer that is out of range. If so, generate code to add the
7840 constant with the low-order 16 bits masked to the register and force
7841 this result into another register (this can be done with `cau').
7842 Then generate an address of REG+(CONST&0xffff), allowing for the
7843 possibility of bit 16 being a one.
7845 Then check for the sum of a register and something not constant, try to
7846 load the other things into a register and return the sum. */
7848 static rtx
7849 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
7850 machine_mode mode)
7852 unsigned int extra;
7854 if (!reg_offset_addressing_ok_p (mode)
7855 || mode_supports_vsx_dform_quad (mode))
7857 if (virtual_stack_registers_memory_p (x))
7858 return x;
7860 /* In theory we should not be seeing addresses of the form reg+0,
7861 but just in case it is generated, optimize it away. */
7862 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
7863 return force_reg (Pmode, XEXP (x, 0));
7865 /* For TImode with load/store quad, restrict addresses to just a single
7866 pointer, so it works with both GPRs and VSX registers. */
7867 /* Make sure both operands are registers. */
7868 else if (GET_CODE (x) == PLUS
7869 && (mode != TImode || !TARGET_QUAD_MEMORY))
7870 return gen_rtx_PLUS (Pmode,
7871 force_reg (Pmode, XEXP (x, 0)),
7872 force_reg (Pmode, XEXP (x, 1)));
7873 else
7874 return force_reg (Pmode, x);
7876 if (GET_CODE (x) == SYMBOL_REF)
7878 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
7879 if (model != 0)
7880 return rs6000_legitimize_tls_address (x, model);
7883 extra = 0;
7884 switch (mode)
7886 case TFmode:
7887 case TDmode:
7888 case TImode:
7889 case PTImode:
7890 case IFmode:
7891 case KFmode:
7892 /* As in legitimate_offset_address_p we do not assume
7893 worst-case. The mode here is just a hint as to the registers
7894 used. A TImode is usually in gprs, but may actually be in
7895 fprs. Leave worst-case scenario for reload to handle via
7896 insn constraints. PTImode is only GPRs. */
7897 extra = 8;
7898 break;
7899 default:
7900 break;
7903 if (GET_CODE (x) == PLUS
7904 && GET_CODE (XEXP (x, 0)) == REG
7905 && GET_CODE (XEXP (x, 1)) == CONST_INT
7906 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
7907 >= 0x10000 - extra)
7908 && !(SPE_VECTOR_MODE (mode)
7909 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
7911 HOST_WIDE_INT high_int, low_int;
7912 rtx sum;
7913 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
7914 if (low_int >= 0x8000 - extra)
7915 low_int = 0;
7916 high_int = INTVAL (XEXP (x, 1)) - low_int;
7917 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
7918 GEN_INT (high_int)), 0);
7919 return plus_constant (Pmode, sum, low_int);
7921 else if (GET_CODE (x) == PLUS
7922 && GET_CODE (XEXP (x, 0)) == REG
7923 && GET_CODE (XEXP (x, 1)) != CONST_INT
7924 && GET_MODE_NUNITS (mode) == 1
7925 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
7926 || (/* ??? Assume floating point reg based on mode? */
7927 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7928 && (mode == DFmode || mode == DDmode)))
7929 && !avoiding_indexed_address_p (mode))
7931 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
7932 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
7934 else if (SPE_VECTOR_MODE (mode)
7935 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
7937 if (mode == DImode)
7938 return x;
7939 /* We accept [reg + reg] and [reg + OFFSET]. */
7941 if (GET_CODE (x) == PLUS)
7943 rtx op1 = XEXP (x, 0);
7944 rtx op2 = XEXP (x, 1);
7945 rtx y;
7947 op1 = force_reg (Pmode, op1);
7949 if (GET_CODE (op2) != REG
7950 && (GET_CODE (op2) != CONST_INT
7951 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
7952 || (GET_MODE_SIZE (mode) > 8
7953 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
7954 op2 = force_reg (Pmode, op2);
7956 /* We can't always do [reg + reg] for these, because [reg +
7957 reg + offset] is not a legitimate addressing mode. */
7958 y = gen_rtx_PLUS (Pmode, op1, op2);
7960 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
7961 return force_reg (Pmode, y);
7962 else
7963 return y;
7966 return force_reg (Pmode, x);
7968 else if ((TARGET_ELF
7969 #if TARGET_MACHO
7970 || !MACHO_DYNAMIC_NO_PIC_P
7971 #endif
7973 && TARGET_32BIT
7974 && TARGET_NO_TOC
7975 && ! flag_pic
7976 && GET_CODE (x) != CONST_INT
7977 && GET_CODE (x) != CONST_WIDE_INT
7978 && GET_CODE (x) != CONST_DOUBLE
7979 && CONSTANT_P (x)
7980 && GET_MODE_NUNITS (mode) == 1
7981 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
7982 || (/* ??? Assume floating point reg based on mode? */
7983 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7984 && (mode == DFmode || mode == DDmode))))
7986 rtx reg = gen_reg_rtx (Pmode);
7987 if (TARGET_ELF)
7988 emit_insn (gen_elf_high (reg, x));
7989 else
7990 emit_insn (gen_macho_high (reg, x));
7991 return gen_rtx_LO_SUM (Pmode, reg, x);
7993 else if (TARGET_TOC
7994 && GET_CODE (x) == SYMBOL_REF
7995 && constant_pool_expr_p (x)
7996 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
7997 return create_TOC_reference (x, NULL_RTX);
7998 else
7999 return x;
8002 /* Debug version of rs6000_legitimize_address. */
8003 static rtx
8004 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8006 rtx ret;
8007 rtx_insn *insns;
8009 start_sequence ();
8010 ret = rs6000_legitimize_address (x, oldx, mode);
8011 insns = get_insns ();
8012 end_sequence ();
8014 if (ret != x)
8016 fprintf (stderr,
8017 "\nrs6000_legitimize_address: mode %s, old code %s, "
8018 "new code %s, modified\n",
8019 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8020 GET_RTX_NAME (GET_CODE (ret)));
8022 fprintf (stderr, "Original address:\n");
8023 debug_rtx (x);
8025 fprintf (stderr, "oldx:\n");
8026 debug_rtx (oldx);
8028 fprintf (stderr, "New address:\n");
8029 debug_rtx (ret);
8031 if (insns)
8033 fprintf (stderr, "Insns added:\n");
8034 debug_rtx_list (insns, 20);
8037 else
8039 fprintf (stderr,
8040 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8041 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8043 debug_rtx (x);
8046 if (insns)
8047 emit_insn (insns);
8049 return ret;
8052 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8053 We need to emit DTP-relative relocations. */
8055 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8056 static void
8057 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8059 switch (size)
8061 case 4:
8062 fputs ("\t.long\t", file);
8063 break;
8064 case 8:
8065 fputs (DOUBLE_INT_ASM_OP, file);
8066 break;
8067 default:
8068 gcc_unreachable ();
8070 output_addr_const (file, x);
8071 if (TARGET_ELF)
8072 fputs ("@dtprel+0x8000", file);
8073 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8075 switch (SYMBOL_REF_TLS_MODEL (x))
8077 case 0:
8078 break;
8079 case TLS_MODEL_LOCAL_EXEC:
8080 fputs ("@le", file);
8081 break;
8082 case TLS_MODEL_INITIAL_EXEC:
8083 fputs ("@ie", file);
8084 break;
8085 case TLS_MODEL_GLOBAL_DYNAMIC:
8086 case TLS_MODEL_LOCAL_DYNAMIC:
8087 fputs ("@m", file);
8088 break;
8089 default:
8090 gcc_unreachable ();
8095 /* Return true if X is a symbol that refers to real (rather than emulated)
8096 TLS. */
8098 static bool
8099 rs6000_real_tls_symbol_ref_p (rtx x)
8101 return (GET_CODE (x) == SYMBOL_REF
8102 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8105 /* In the name of slightly smaller debug output, and to cater to
8106 general assembler lossage, recognize various UNSPEC sequences
8107 and turn them back into a direct symbol reference. */
8109 static rtx
8110 rs6000_delegitimize_address (rtx orig_x)
8112 rtx x, y, offset;
8114 orig_x = delegitimize_mem_from_attrs (orig_x);
8115 x = orig_x;
8116 if (MEM_P (x))
8117 x = XEXP (x, 0);
8119 y = x;
8120 if (TARGET_CMODEL != CMODEL_SMALL
8121 && GET_CODE (y) == LO_SUM)
8122 y = XEXP (y, 1);
8124 offset = NULL_RTX;
8125 if (GET_CODE (y) == PLUS
8126 && GET_MODE (y) == Pmode
8127 && CONST_INT_P (XEXP (y, 1)))
8129 offset = XEXP (y, 1);
8130 y = XEXP (y, 0);
8133 if (GET_CODE (y) == UNSPEC
8134 && XINT (y, 1) == UNSPEC_TOCREL)
8136 y = XVECEXP (y, 0, 0);
8138 #ifdef HAVE_AS_TLS
8139 /* Do not associate thread-local symbols with the original
8140 constant pool symbol. */
8141 if (TARGET_XCOFF
8142 && GET_CODE (y) == SYMBOL_REF
8143 && CONSTANT_POOL_ADDRESS_P (y)
8144 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8145 return orig_x;
8146 #endif
8148 if (offset != NULL_RTX)
8149 y = gen_rtx_PLUS (Pmode, y, offset);
8150 if (!MEM_P (orig_x))
8151 return y;
8152 else
8153 return replace_equiv_address_nv (orig_x, y);
8156 if (TARGET_MACHO
8157 && GET_CODE (orig_x) == LO_SUM
8158 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8160 y = XEXP (XEXP (orig_x, 1), 0);
8161 if (GET_CODE (y) == UNSPEC
8162 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8163 return XVECEXP (y, 0, 0);
8166 return orig_x;
8169 /* Return true if X shouldn't be emitted into the debug info.
8170 The linker doesn't like .toc section references from
8171 .debug_* sections, so reject .toc section symbols. */
8173 static bool
8174 rs6000_const_not_ok_for_debug_p (rtx x)
8176 if (GET_CODE (x) == SYMBOL_REF
8177 && CONSTANT_POOL_ADDRESS_P (x))
8179 rtx c = get_pool_constant (x);
8180 machine_mode cmode = get_pool_mode (x);
8181 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8182 return true;
8185 return false;
8188 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8190 static GTY(()) rtx rs6000_tls_symbol;
8191 static rtx
8192 rs6000_tls_get_addr (void)
8194 if (!rs6000_tls_symbol)
8195 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8197 return rs6000_tls_symbol;
8200 /* Construct the SYMBOL_REF for TLS GOT references. */
8202 static GTY(()) rtx rs6000_got_symbol;
8203 static rtx
8204 rs6000_got_sym (void)
8206 if (!rs6000_got_symbol)
8208 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8209 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8210 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8213 return rs6000_got_symbol;
8216 /* AIX Thread-Local Address support. */
8218 static rtx
8219 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8221 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8222 const char *name;
8223 char *tlsname;
8225 name = XSTR (addr, 0);
8226 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8227 or the symbol will be in TLS private data section. */
8228 if (name[strlen (name) - 1] != ']'
8229 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8230 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8232 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8233 strcpy (tlsname, name);
8234 strcat (tlsname,
8235 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8236 tlsaddr = copy_rtx (addr);
8237 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8239 else
8240 tlsaddr = addr;
8242 /* Place addr into TOC constant pool. */
8243 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8245 /* Output the TOC entry and create the MEM referencing the value. */
8246 if (constant_pool_expr_p (XEXP (sym, 0))
8247 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8249 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8250 mem = gen_const_mem (Pmode, tocref);
8251 set_mem_alias_set (mem, get_TOC_alias_set ());
8253 else
8254 return sym;
8256 /* Use global-dynamic for local-dynamic. */
8257 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8258 || model == TLS_MODEL_LOCAL_DYNAMIC)
8260 /* Create new TOC reference for @m symbol. */
8261 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8262 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8263 strcpy (tlsname, "*LCM");
8264 strcat (tlsname, name + 3);
8265 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8266 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8267 tocref = create_TOC_reference (modaddr, NULL_RTX);
8268 rtx modmem = gen_const_mem (Pmode, tocref);
8269 set_mem_alias_set (modmem, get_TOC_alias_set ());
8271 rtx modreg = gen_reg_rtx (Pmode);
8272 emit_insn (gen_rtx_SET (modreg, modmem));
8274 tmpreg = gen_reg_rtx (Pmode);
8275 emit_insn (gen_rtx_SET (tmpreg, mem));
8277 dest = gen_reg_rtx (Pmode);
8278 if (TARGET_32BIT)
8279 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8280 else
8281 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8282 return dest;
8284 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8285 else if (TARGET_32BIT)
8287 tlsreg = gen_reg_rtx (SImode);
8288 emit_insn (gen_tls_get_tpointer (tlsreg));
8290 else
8291 tlsreg = gen_rtx_REG (DImode, 13);
8293 /* Load the TOC value into temporary register. */
8294 tmpreg = gen_reg_rtx (Pmode);
8295 emit_insn (gen_rtx_SET (tmpreg, mem));
8296 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8297 gen_rtx_MINUS (Pmode, addr, tlsreg));
8299 /* Add TOC symbol value to TLS pointer. */
8300 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8302 return dest;
8305 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8306 this (thread-local) address. */
8308 static rtx
8309 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8311 rtx dest, insn;
8313 if (TARGET_XCOFF)
8314 return rs6000_legitimize_tls_address_aix (addr, model);
8316 dest = gen_reg_rtx (Pmode);
8317 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8319 rtx tlsreg;
8321 if (TARGET_64BIT)
8323 tlsreg = gen_rtx_REG (Pmode, 13);
8324 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8326 else
8328 tlsreg = gen_rtx_REG (Pmode, 2);
8329 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8331 emit_insn (insn);
8333 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8335 rtx tlsreg, tmp;
8337 tmp = gen_reg_rtx (Pmode);
8338 if (TARGET_64BIT)
8340 tlsreg = gen_rtx_REG (Pmode, 13);
8341 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8343 else
8345 tlsreg = gen_rtx_REG (Pmode, 2);
8346 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8348 emit_insn (insn);
8349 if (TARGET_64BIT)
8350 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8351 else
8352 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8353 emit_insn (insn);
8355 else
8357 rtx r3, got, tga, tmp1, tmp2, call_insn;
8359 /* We currently use relocations like @got@tlsgd for tls, which
8360 means the linker will handle allocation of tls entries, placing
8361 them in the .got section. So use a pointer to the .got section,
8362 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8363 or to secondary GOT sections used by 32-bit -fPIC. */
8364 if (TARGET_64BIT)
8365 got = gen_rtx_REG (Pmode, 2);
8366 else
8368 if (flag_pic == 1)
8369 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8370 else
8372 rtx gsym = rs6000_got_sym ();
8373 got = gen_reg_rtx (Pmode);
8374 if (flag_pic == 0)
8375 rs6000_emit_move (got, gsym, Pmode);
8376 else
8378 rtx mem, lab, last;
8380 tmp1 = gen_reg_rtx (Pmode);
8381 tmp2 = gen_reg_rtx (Pmode);
8382 mem = gen_const_mem (Pmode, tmp1);
8383 lab = gen_label_rtx ();
8384 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8385 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8386 if (TARGET_LINK_STACK)
8387 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8388 emit_move_insn (tmp2, mem);
8389 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8390 set_unique_reg_note (last, REG_EQUAL, gsym);
8395 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8397 tga = rs6000_tls_get_addr ();
8398 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8399 1, const0_rtx, Pmode);
8401 r3 = gen_rtx_REG (Pmode, 3);
8402 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8404 if (TARGET_64BIT)
8405 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
8406 else
8407 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
8409 else if (DEFAULT_ABI == ABI_V4)
8410 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
8411 else
8412 gcc_unreachable ();
8413 call_insn = last_call_insn ();
8414 PATTERN (call_insn) = insn;
8415 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8416 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8417 pic_offset_table_rtx);
8419 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8421 tga = rs6000_tls_get_addr ();
8422 tmp1 = gen_reg_rtx (Pmode);
8423 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8424 1, const0_rtx, Pmode);
8426 r3 = gen_rtx_REG (Pmode, 3);
8427 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8429 if (TARGET_64BIT)
8430 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
8431 else
8432 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
8434 else if (DEFAULT_ABI == ABI_V4)
8435 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
8436 else
8437 gcc_unreachable ();
8438 call_insn = last_call_insn ();
8439 PATTERN (call_insn) = insn;
8440 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8441 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8442 pic_offset_table_rtx);
8444 if (rs6000_tls_size == 16)
8446 if (TARGET_64BIT)
8447 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8448 else
8449 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8451 else if (rs6000_tls_size == 32)
8453 tmp2 = gen_reg_rtx (Pmode);
8454 if (TARGET_64BIT)
8455 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8456 else
8457 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8458 emit_insn (insn);
8459 if (TARGET_64BIT)
8460 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8461 else
8462 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8464 else
8466 tmp2 = gen_reg_rtx (Pmode);
8467 if (TARGET_64BIT)
8468 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8469 else
8470 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8471 emit_insn (insn);
8472 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8474 emit_insn (insn);
8476 else
8478 /* IE, or 64-bit offset LE. */
8479 tmp2 = gen_reg_rtx (Pmode);
8480 if (TARGET_64BIT)
8481 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8482 else
8483 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8484 emit_insn (insn);
8485 if (TARGET_64BIT)
8486 insn = gen_tls_tls_64 (dest, tmp2, addr);
8487 else
8488 insn = gen_tls_tls_32 (dest, tmp2, addr);
8489 emit_insn (insn);
8493 return dest;
8496 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8498 static bool
8499 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8501 if (GET_CODE (x) == HIGH
8502 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8503 return true;
8505 /* A TLS symbol in the TOC cannot contain a sum. */
8506 if (GET_CODE (x) == CONST
8507 && GET_CODE (XEXP (x, 0)) == PLUS
8508 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8509 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8510 return true;
8512 /* Do not place an ELF TLS symbol in the constant pool. */
8513 return TARGET_ELF && tls_referenced_p (x);
8516 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8517 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8518 can be addressed relative to the toc pointer. */
8520 static bool
8521 use_toc_relative_ref (rtx sym, machine_mode mode)
8523 return ((constant_pool_expr_p (sym)
8524 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8525 get_pool_mode (sym)))
8526 || (TARGET_CMODEL == CMODEL_MEDIUM
8527 && SYMBOL_REF_LOCAL_P (sym)
8528 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8531 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8532 replace the input X, or the original X if no replacement is called for.
8533 The output parameter *WIN is 1 if the calling macro should goto WIN,
8534 0 if it should not.
8536 For RS/6000, we wish to handle large displacements off a base
8537 register by splitting the addend across an addiu/addis and the mem insn.
8538 This cuts number of extra insns needed from 3 to 1.
8540 On Darwin, we use this to generate code for floating point constants.
8541 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8542 The Darwin code is inside #if TARGET_MACHO because only then are the
8543 machopic_* functions defined. */
8544 static rtx
8545 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8546 int opnum, int type,
8547 int ind_levels ATTRIBUTE_UNUSED, int *win)
8549 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8550 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
8552 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8553 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8554 if (reg_offset_p
8555 && opnum == 1
8556 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8557 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8558 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8559 && TARGET_P9_VECTOR)
8560 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8561 && TARGET_P9_VECTOR)))
8562 reg_offset_p = false;
8564 /* We must recognize output that we have already generated ourselves. */
8565 if (GET_CODE (x) == PLUS
8566 && GET_CODE (XEXP (x, 0)) == PLUS
8567 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8568 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8569 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8571 if (TARGET_DEBUG_ADDR)
8573 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8574 debug_rtx (x);
8576 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8577 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8578 opnum, (enum reload_type) type);
8579 *win = 1;
8580 return x;
8583 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8584 if (GET_CODE (x) == LO_SUM
8585 && GET_CODE (XEXP (x, 0)) == HIGH)
8587 if (TARGET_DEBUG_ADDR)
8589 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8590 debug_rtx (x);
8592 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8593 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8594 opnum, (enum reload_type) type);
8595 *win = 1;
8596 return x;
8599 #if TARGET_MACHO
8600 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8601 && GET_CODE (x) == LO_SUM
8602 && GET_CODE (XEXP (x, 0)) == PLUS
8603 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8604 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8605 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8606 && machopic_operand_p (XEXP (x, 1)))
8608 /* Result of previous invocation of this function on Darwin
8609 floating point constant. */
8610 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8611 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8612 opnum, (enum reload_type) type);
8613 *win = 1;
8614 return x;
8616 #endif
8618 if (TARGET_CMODEL != CMODEL_SMALL
8619 && reg_offset_p
8620 && !quad_offset_p
8621 && small_toc_ref (x, VOIDmode))
8623 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8624 x = gen_rtx_LO_SUM (Pmode, hi, x);
8625 if (TARGET_DEBUG_ADDR)
8627 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8628 debug_rtx (x);
8630 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8631 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8632 opnum, (enum reload_type) type);
8633 *win = 1;
8634 return x;
8637 if (GET_CODE (x) == PLUS
8638 && REG_P (XEXP (x, 0))
8639 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
8640 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8641 && CONST_INT_P (XEXP (x, 1))
8642 && reg_offset_p
8643 && !SPE_VECTOR_MODE (mode)
8644 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
8645 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8647 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
8648 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
8649 HOST_WIDE_INT high
8650 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8652 /* Check for 32-bit overflow or quad addresses with one of the
8653 four least significant bits set. */
8654 if (high + low != val
8655 || (quad_offset_p && (low & 0xf)))
8657 *win = 0;
8658 return x;
8661 /* Reload the high part into a base reg; leave the low part
8662 in the mem directly. */
8664 x = gen_rtx_PLUS (GET_MODE (x),
8665 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8666 GEN_INT (high)),
8667 GEN_INT (low));
8669 if (TARGET_DEBUG_ADDR)
8671 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
8672 debug_rtx (x);
8674 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8675 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8676 opnum, (enum reload_type) type);
8677 *win = 1;
8678 return x;
8681 if (GET_CODE (x) == SYMBOL_REF
8682 && reg_offset_p
8683 && !quad_offset_p
8684 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
8685 && !SPE_VECTOR_MODE (mode)
8686 #if TARGET_MACHO
8687 && DEFAULT_ABI == ABI_DARWIN
8688 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
8689 && machopic_symbol_defined_p (x)
8690 #else
8691 && DEFAULT_ABI == ABI_V4
8692 && !flag_pic
8693 #endif
8694 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
8695 The same goes for DImode without 64-bit gprs and DFmode and DDmode
8696 without fprs.
8697 ??? Assume floating point reg based on mode? This assumption is
8698 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
8699 where reload ends up doing a DFmode load of a constant from
8700 mem using two gprs. Unfortunately, at this point reload
8701 hasn't yet selected regs so poking around in reload data
8702 won't help and even if we could figure out the regs reliably,
8703 we'd still want to allow this transformation when the mem is
8704 naturally aligned. Since we say the address is good here, we
8705 can't disable offsets from LO_SUMs in mem_operand_gpr.
8706 FIXME: Allow offset from lo_sum for other modes too, when
8707 mem is sufficiently aligned.
8709 Also disallow this if the type can go in VMX/Altivec registers, since
8710 those registers do not have d-form (reg+offset) address modes. */
8711 && !reg_addr[mode].scalar_in_vmx_p
8712 && mode != TFmode
8713 && mode != TDmode
8714 && mode != IFmode
8715 && mode != KFmode
8716 && (mode != TImode || !TARGET_VSX_TIMODE)
8717 && mode != PTImode
8718 && (mode != DImode || TARGET_POWERPC64)
8719 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
8720 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
8722 #if TARGET_MACHO
8723 if (flag_pic)
8725 rtx offset = machopic_gen_offset (x);
8726 x = gen_rtx_LO_SUM (GET_MODE (x),
8727 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
8728 gen_rtx_HIGH (Pmode, offset)), offset);
8730 else
8731 #endif
8732 x = gen_rtx_LO_SUM (GET_MODE (x),
8733 gen_rtx_HIGH (Pmode, x), x);
8735 if (TARGET_DEBUG_ADDR)
8737 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
8738 debug_rtx (x);
8740 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8741 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8742 opnum, (enum reload_type) type);
8743 *win = 1;
8744 return x;
8747 /* Reload an offset address wrapped by an AND that represents the
8748 masking of the lower bits. Strip the outer AND and let reload
8749 convert the offset address into an indirect address. For VSX,
8750 force reload to create the address with an AND in a separate
8751 register, because we can't guarantee an altivec register will
8752 be used. */
8753 if (VECTOR_MEM_ALTIVEC_P (mode)
8754 && GET_CODE (x) == AND
8755 && GET_CODE (XEXP (x, 0)) == PLUS
8756 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8757 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8758 && GET_CODE (XEXP (x, 1)) == CONST_INT
8759 && INTVAL (XEXP (x, 1)) == -16)
8761 x = XEXP (x, 0);
8762 *win = 1;
8763 return x;
8766 if (TARGET_TOC
8767 && reg_offset_p
8768 && !quad_offset_p
8769 && GET_CODE (x) == SYMBOL_REF
8770 && use_toc_relative_ref (x, mode))
8772 x = create_TOC_reference (x, NULL_RTX);
8773 if (TARGET_CMODEL != CMODEL_SMALL)
8775 if (TARGET_DEBUG_ADDR)
8777 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
8778 debug_rtx (x);
8780 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8781 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8782 opnum, (enum reload_type) type);
8784 *win = 1;
8785 return x;
8787 *win = 0;
8788 return x;
8791 /* Debug version of rs6000_legitimize_reload_address. */
8792 static rtx
8793 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
8794 int opnum, int type,
8795 int ind_levels, int *win)
8797 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
8798 ind_levels, win);
8799 fprintf (stderr,
8800 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
8801 "type = %d, ind_levels = %d, win = %d, original addr:\n",
8802 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
8803 debug_rtx (x);
8805 if (x == ret)
8806 fprintf (stderr, "Same address returned\n");
8807 else if (!ret)
8808 fprintf (stderr, "NULL returned\n");
8809 else
8811 fprintf (stderr, "New address:\n");
8812 debug_rtx (ret);
8815 return ret;
8818 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8819 that is a valid memory address for an instruction.
8820 The MODE argument is the machine mode for the MEM expression
8821 that wants to use this address.
8823 On the RS/6000, there are four valid address: a SYMBOL_REF that
8824 refers to a constant pool entry of an address (or the sum of it
8825 plus a constant), a short (16-bit signed) constant plus a register,
8826 the sum of two registers, or a register indirect, possibly with an
8827 auto-increment. For DFmode, DDmode and DImode with a constant plus
8828 register, we must ensure that both words are addressable or PowerPC64
8829 with offset word aligned.
8831 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8832 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8833 because adjacent memory cells are accessed by adding word-sized offsets
8834 during assembly output. */
8835 static bool
8836 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8838 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8839 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
8841 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8842 if (VECTOR_MEM_ALTIVEC_P (mode)
8843 && GET_CODE (x) == AND
8844 && GET_CODE (XEXP (x, 1)) == CONST_INT
8845 && INTVAL (XEXP (x, 1)) == -16)
8846 x = XEXP (x, 0);
8848 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8849 return 0;
8850 if (legitimate_indirect_address_p (x, reg_ok_strict))
8851 return 1;
8852 if (TARGET_UPDATE
8853 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8854 && mode_supports_pre_incdec_p (mode)
8855 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8856 return 1;
8857 /* Handle restricted vector d-form offsets in ISA 3.0. */
8858 if (quad_offset_p)
8860 if (quad_address_p (x, mode, reg_ok_strict))
8861 return 1;
8863 else if (virtual_stack_registers_memory_p (x))
8864 return 1;
8866 else if (reg_offset_p)
8868 if (legitimate_small_data_p (mode, x))
8869 return 1;
8870 if (legitimate_constant_pool_address_p (x, mode,
8871 reg_ok_strict || lra_in_progress))
8872 return 1;
8873 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
8874 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
8875 return 1;
8878 /* For TImode, if we have load/store quad and TImode in VSX registers, only
8879 allow register indirect addresses. This will allow the values to go in
8880 either GPRs or VSX registers without reloading. The vector types would
8881 tend to go into VSX registers, so we allow REG+REG, while TImode seems
8882 somewhat split, in that some uses are GPR based, and some VSX based. */
8883 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
8884 return 0;
8885 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8886 if (! reg_ok_strict
8887 && reg_offset_p
8888 && GET_CODE (x) == PLUS
8889 && GET_CODE (XEXP (x, 0)) == REG
8890 && (XEXP (x, 0) == virtual_stack_vars_rtx
8891 || XEXP (x, 0) == arg_pointer_rtx)
8892 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8893 return 1;
8894 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8895 return 1;
8896 if (!FLOAT128_2REG_P (mode)
8897 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
8898 || TARGET_POWERPC64
8899 || (mode != DFmode && mode != DDmode)
8900 || (TARGET_E500_DOUBLE && mode != DDmode))
8901 && (TARGET_POWERPC64 || mode != DImode)
8902 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8903 && mode != PTImode
8904 && !avoiding_indexed_address_p (mode)
8905 && legitimate_indexed_address_p (x, reg_ok_strict))
8906 return 1;
8907 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8908 && mode_supports_pre_modify_p (mode)
8909 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8910 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8911 reg_ok_strict, false)
8912 || (!avoiding_indexed_address_p (mode)
8913 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8914 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
8915 return 1;
8916 if (reg_offset_p && !quad_offset_p
8917 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
8918 return 1;
8919 return 0;
8922 /* Debug version of rs6000_legitimate_address_p. */
8923 static bool
8924 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
8925 bool reg_ok_strict)
8927 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
8928 fprintf (stderr,
8929 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
8930 "strict = %d, reload = %s, code = %s\n",
8931 ret ? "true" : "false",
8932 GET_MODE_NAME (mode),
8933 reg_ok_strict,
8934 (reload_completed
8935 ? "after"
8936 : (reload_in_progress ? "progress" : "before")),
8937 GET_RTX_NAME (GET_CODE (x)));
8938 debug_rtx (x);
8940 return ret;
8943 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
8945 static bool
8946 rs6000_mode_dependent_address_p (const_rtx addr,
8947 addr_space_t as ATTRIBUTE_UNUSED)
8949 return rs6000_mode_dependent_address_ptr (addr);
8952 /* Go to LABEL if ADDR (a legitimate address expression)
8953 has an effect that depends on the machine mode it is used for.
8955 On the RS/6000 this is true of all integral offsets (since AltiVec
8956 and VSX modes don't allow them) or is a pre-increment or decrement.
8958 ??? Except that due to conceptual problems in offsettable_address_p
8959 we can't really report the problems of integral offsets. So leave
8960 this assuming that the adjustable offset must be valid for the
8961 sub-words of a TFmode operand, which is what we had before. */
8963 static bool
8964 rs6000_mode_dependent_address (const_rtx addr)
8966 switch (GET_CODE (addr))
8968 case PLUS:
8969 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
8970 is considered a legitimate address before reload, so there
8971 are no offset restrictions in that case. Note that this
8972 condition is safe in strict mode because any address involving
8973 virtual_stack_vars_rtx or arg_pointer_rtx would already have
8974 been rejected as illegitimate. */
8975 if (XEXP (addr, 0) != virtual_stack_vars_rtx
8976 && XEXP (addr, 0) != arg_pointer_rtx
8977 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
8979 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
8980 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
8982 break;
8984 case LO_SUM:
8985 /* Anything in the constant pool is sufficiently aligned that
8986 all bytes have the same high part address. */
8987 return !legitimate_constant_pool_address_p (addr, QImode, false);
8989 /* Auto-increment cases are now treated generically in recog.c. */
8990 case PRE_MODIFY:
8991 return TARGET_UPDATE;
8993 /* AND is only allowed in Altivec loads. */
8994 case AND:
8995 return true;
8997 default:
8998 break;
9001 return false;
9004 /* Debug version of rs6000_mode_dependent_address. */
9005 static bool
9006 rs6000_debug_mode_dependent_address (const_rtx addr)
9008 bool ret = rs6000_mode_dependent_address (addr);
9010 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9011 ret ? "true" : "false");
9012 debug_rtx (addr);
9014 return ret;
9017 /* Implement FIND_BASE_TERM. */
9020 rs6000_find_base_term (rtx op)
9022 rtx base;
9024 base = op;
9025 if (GET_CODE (base) == CONST)
9026 base = XEXP (base, 0);
9027 if (GET_CODE (base) == PLUS)
9028 base = XEXP (base, 0);
9029 if (GET_CODE (base) == UNSPEC)
9030 switch (XINT (base, 1))
9032 case UNSPEC_TOCREL:
9033 case UNSPEC_MACHOPIC_OFFSET:
9034 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9035 for aliasing purposes. */
9036 return XVECEXP (base, 0, 0);
9039 return op;
9042 /* More elaborate version of recog's offsettable_memref_p predicate
9043 that works around the ??? note of rs6000_mode_dependent_address.
9044 In particular it accepts
9046 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9048 in 32-bit mode, that the recog predicate rejects. */
9050 static bool
9051 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9053 bool worst_case;
9055 if (!MEM_P (op))
9056 return false;
9058 /* First mimic offsettable_memref_p. */
9059 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9060 return true;
9062 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9063 the latter predicate knows nothing about the mode of the memory
9064 reference and, therefore, assumes that it is the largest supported
9065 mode (TFmode). As a consequence, legitimate offsettable memory
9066 references are rejected. rs6000_legitimate_offset_address_p contains
9067 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9068 at least with a little bit of help here given that we know the
9069 actual registers used. */
9070 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9071 || GET_MODE_SIZE (reg_mode) == 4);
9072 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9073 true, worst_case);
9076 /* Determine the reassociation width to be used in reassociate_bb.
9077 This takes into account how many parallel operations we
9078 can actually do of a given type, and also the latency.
9080 int add/sub 6/cycle
9081 mul 2/cycle
9082 vect add/sub/mul 2/cycle
9083 fp add/sub/mul 2/cycle
9084 dfp 1/cycle
9087 static int
9088 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9089 enum machine_mode mode)
9091 switch (rs6000_cpu)
9093 case PROCESSOR_POWER8:
9094 case PROCESSOR_POWER9:
9095 if (DECIMAL_FLOAT_MODE_P (mode))
9096 return 1;
9097 if (VECTOR_MODE_P (mode))
9098 return 4;
9099 if (INTEGRAL_MODE_P (mode))
9100 return opc == MULT_EXPR ? 4 : 6;
9101 if (FLOAT_MODE_P (mode))
9102 return 4;
9103 break;
9104 default:
9105 break;
9107 return 1;
9110 /* Change register usage conditional on target flags. */
9111 static void
9112 rs6000_conditional_register_usage (void)
9114 int i;
9116 if (TARGET_DEBUG_TARGET)
9117 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9119 /* Set MQ register fixed (already call_used) so that it will not be
9120 allocated. */
9121 fixed_regs[64] = 1;
9123 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9124 if (TARGET_64BIT)
9125 fixed_regs[13] = call_used_regs[13]
9126 = call_really_used_regs[13] = 1;
9128 /* Conditionally disable FPRs. */
9129 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
9130 for (i = 32; i < 64; i++)
9131 fixed_regs[i] = call_used_regs[i]
9132 = call_really_used_regs[i] = 1;
9134 /* The TOC register is not killed across calls in a way that is
9135 visible to the compiler. */
9136 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9137 call_really_used_regs[2] = 0;
9139 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9140 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9142 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9143 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9144 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9145 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9147 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9148 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9149 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9150 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9152 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9153 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9154 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9156 if (TARGET_SPE)
9158 global_regs[SPEFSCR_REGNO] = 1;
9159 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
9160 registers in prologues and epilogues. We no longer use r14
9161 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
9162 pool for link-compatibility with older versions of GCC. Once
9163 "old" code has died out, we can return r14 to the allocation
9164 pool. */
9165 fixed_regs[14]
9166 = call_used_regs[14]
9167 = call_really_used_regs[14] = 1;
9170 if (!TARGET_ALTIVEC && !TARGET_VSX)
9172 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9173 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9174 call_really_used_regs[VRSAVE_REGNO] = 1;
9177 if (TARGET_ALTIVEC || TARGET_VSX)
9178 global_regs[VSCR_REGNO] = 1;
9180 if (TARGET_ALTIVEC_ABI)
9182 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9183 call_used_regs[i] = call_really_used_regs[i] = 1;
9185 /* AIX reserves VR20:31 in non-extended ABI mode. */
9186 if (TARGET_XCOFF)
9187 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9188 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9193 /* Output insns to set DEST equal to the constant SOURCE as a series of
9194 lis, ori and shl instructions and return TRUE. */
9196 bool
9197 rs6000_emit_set_const (rtx dest, rtx source)
9199 machine_mode mode = GET_MODE (dest);
9200 rtx temp, set;
9201 rtx_insn *insn;
9202 HOST_WIDE_INT c;
9204 gcc_checking_assert (CONST_INT_P (source));
9205 c = INTVAL (source);
9206 switch (mode)
9208 case QImode:
9209 case HImode:
9210 emit_insn (gen_rtx_SET (dest, source));
9211 return true;
9213 case SImode:
9214 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9216 emit_insn (gen_rtx_SET (copy_rtx (temp),
9217 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9218 emit_insn (gen_rtx_SET (dest,
9219 gen_rtx_IOR (SImode, copy_rtx (temp),
9220 GEN_INT (c & 0xffff))));
9221 break;
9223 case DImode:
9224 if (!TARGET_POWERPC64)
9226 rtx hi, lo;
9228 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9229 DImode);
9230 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9231 DImode);
9232 emit_move_insn (hi, GEN_INT (c >> 32));
9233 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9234 emit_move_insn (lo, GEN_INT (c));
9236 else
9237 rs6000_emit_set_long_const (dest, c);
9238 break;
9240 default:
9241 gcc_unreachable ();
9244 insn = get_last_insn ();
9245 set = single_set (insn);
9246 if (! CONSTANT_P (SET_SRC (set)))
9247 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9249 return true;
9252 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9253 Output insns to set DEST equal to the constant C as a series of
9254 lis, ori and shl instructions. */
9256 static void
9257 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9259 rtx temp;
9260 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9262 ud1 = c & 0xffff;
9263 c = c >> 16;
9264 ud2 = c & 0xffff;
9265 c = c >> 16;
9266 ud3 = c & 0xffff;
9267 c = c >> 16;
9268 ud4 = c & 0xffff;
9270 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9271 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9272 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9274 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9275 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9277 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9279 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9280 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9281 if (ud1 != 0)
9282 emit_move_insn (dest,
9283 gen_rtx_IOR (DImode, copy_rtx (temp),
9284 GEN_INT (ud1)));
9286 else if (ud3 == 0 && ud4 == 0)
9288 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9290 gcc_assert (ud2 & 0x8000);
9291 emit_move_insn (copy_rtx (temp),
9292 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9293 if (ud1 != 0)
9294 emit_move_insn (copy_rtx (temp),
9295 gen_rtx_IOR (DImode, copy_rtx (temp),
9296 GEN_INT (ud1)));
9297 emit_move_insn (dest,
9298 gen_rtx_ZERO_EXTEND (DImode,
9299 gen_lowpart (SImode,
9300 copy_rtx (temp))));
9302 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9303 || (ud4 == 0 && ! (ud3 & 0x8000)))
9305 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9307 emit_move_insn (copy_rtx (temp),
9308 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9309 if (ud2 != 0)
9310 emit_move_insn (copy_rtx (temp),
9311 gen_rtx_IOR (DImode, copy_rtx (temp),
9312 GEN_INT (ud2)));
9313 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9314 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9315 GEN_INT (16)));
9316 if (ud1 != 0)
9317 emit_move_insn (dest,
9318 gen_rtx_IOR (DImode, copy_rtx (temp),
9319 GEN_INT (ud1)));
9321 else
9323 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9325 emit_move_insn (copy_rtx (temp),
9326 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9327 if (ud3 != 0)
9328 emit_move_insn (copy_rtx (temp),
9329 gen_rtx_IOR (DImode, copy_rtx (temp),
9330 GEN_INT (ud3)));
9332 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9333 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9334 GEN_INT (32)));
9335 if (ud2 != 0)
9336 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9337 gen_rtx_IOR (DImode, copy_rtx (temp),
9338 GEN_INT (ud2 << 16)));
9339 if (ud1 != 0)
9340 emit_move_insn (dest,
9341 gen_rtx_IOR (DImode, copy_rtx (temp),
9342 GEN_INT (ud1)));
9346 /* Helper for the following. Get rid of [r+r] memory refs
9347 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9349 static void
9350 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9352 if (reload_in_progress)
9353 return;
9355 if (GET_CODE (operands[0]) == MEM
9356 && GET_CODE (XEXP (operands[0], 0)) != REG
9357 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9358 GET_MODE (operands[0]), false))
9359 operands[0]
9360 = replace_equiv_address (operands[0],
9361 copy_addr_to_reg (XEXP (operands[0], 0)));
9363 if (GET_CODE (operands[1]) == MEM
9364 && GET_CODE (XEXP (operands[1], 0)) != REG
9365 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9366 GET_MODE (operands[1]), false))
9367 operands[1]
9368 = replace_equiv_address (operands[1],
9369 copy_addr_to_reg (XEXP (operands[1], 0)));
9372 /* Generate a vector of constants to permute MODE for a little-endian
9373 storage operation by swapping the two halves of a vector. */
9374 static rtvec
9375 rs6000_const_vec (machine_mode mode)
9377 int i, subparts;
9378 rtvec v;
9380 switch (mode)
9382 case V1TImode:
9383 subparts = 1;
9384 break;
9385 case V2DFmode:
9386 case V2DImode:
9387 subparts = 2;
9388 break;
9389 case V4SFmode:
9390 case V4SImode:
9391 subparts = 4;
9392 break;
9393 case V8HImode:
9394 subparts = 8;
9395 break;
9396 case V16QImode:
9397 subparts = 16;
9398 break;
9399 default:
9400 gcc_unreachable();
9403 v = rtvec_alloc (subparts);
9405 for (i = 0; i < subparts / 2; ++i)
9406 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9407 for (i = subparts / 2; i < subparts; ++i)
9408 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9410 return v;
9413 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
9414 for a VSX load or store operation. */
9416 rs6000_gen_le_vsx_permute (rtx source, machine_mode mode)
9418 /* Use ROTATE instead of VEC_SELECT on IEEE 128-bit floating point, and
9419 128-bit integers if they are allowed in VSX registers. */
9420 if (FLOAT128_VECTOR_P (mode) || mode == TImode)
9421 return gen_rtx_ROTATE (mode, source, GEN_INT (64));
9422 else
9424 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9425 return gen_rtx_VEC_SELECT (mode, source, par);
9429 /* Emit a little-endian load from vector memory location SOURCE to VSX
9430 register DEST in mode MODE. The load is done with two permuting
9431 insn's that represent an lxvd2x and xxpermdi. */
9432 void
9433 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9435 rtx tmp, permute_mem, permute_reg;
9437 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9438 V1TImode). */
9439 if (mode == TImode || mode == V1TImode)
9441 mode = V2DImode;
9442 dest = gen_lowpart (V2DImode, dest);
9443 source = adjust_address (source, V2DImode, 0);
9446 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9447 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
9448 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
9449 emit_insn (gen_rtx_SET (tmp, permute_mem));
9450 emit_insn (gen_rtx_SET (dest, permute_reg));
9453 /* Emit a little-endian store to vector memory location DEST from VSX
9454 register SOURCE in mode MODE. The store is done with two permuting
9455 insn's that represent an xxpermdi and an stxvd2x. */
9456 void
9457 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9459 rtx tmp, permute_src, permute_tmp;
9461 /* This should never be called during or after reload, because it does
9462 not re-permute the source register. It is intended only for use
9463 during expand. */
9464 gcc_assert (!reload_in_progress && !lra_in_progress && !reload_completed);
9466 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9467 V1TImode). */
9468 if (mode == TImode || mode == V1TImode)
9470 mode = V2DImode;
9471 dest = adjust_address (dest, V2DImode, 0);
9472 source = gen_lowpart (V2DImode, source);
9475 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9476 permute_src = rs6000_gen_le_vsx_permute (source, mode);
9477 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
9478 emit_insn (gen_rtx_SET (tmp, permute_src));
9479 emit_insn (gen_rtx_SET (dest, permute_tmp));
9482 /* Emit a sequence representing a little-endian VSX load or store,
9483 moving data from SOURCE to DEST in mode MODE. This is done
9484 separately from rs6000_emit_move to ensure it is called only
9485 during expand. LE VSX loads and stores introduced later are
9486 handled with a split. The expand-time RTL generation allows
9487 us to optimize away redundant pairs of register-permutes. */
9488 void
9489 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9491 gcc_assert (!BYTES_BIG_ENDIAN
9492 && VECTOR_MEM_VSX_P (mode)
9493 && !TARGET_P9_VECTOR
9494 && !gpr_or_gpr_p (dest, source)
9495 && (MEM_P (source) ^ MEM_P (dest)));
9497 if (MEM_P (source))
9499 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9500 rs6000_emit_le_vsx_load (dest, source, mode);
9502 else
9504 if (!REG_P (source))
9505 source = force_reg (mode, source);
9506 rs6000_emit_le_vsx_store (dest, source, mode);
9510 /* Emit a move from SOURCE to DEST in mode MODE. */
9511 void
9512 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9514 rtx operands[2];
9515 operands[0] = dest;
9516 operands[1] = source;
9518 if (TARGET_DEBUG_ADDR)
9520 fprintf (stderr,
9521 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
9522 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9523 GET_MODE_NAME (mode),
9524 reload_in_progress,
9525 reload_completed,
9526 can_create_pseudo_p ());
9527 debug_rtx (dest);
9528 fprintf (stderr, "source:\n");
9529 debug_rtx (source);
9532 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
9533 if (CONST_WIDE_INT_P (operands[1])
9534 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9536 /* This should be fixed with the introduction of CONST_WIDE_INT. */
9537 gcc_unreachable ();
9540 /* Check if GCC is setting up a block move that will end up using FP
9541 registers as temporaries. We must make sure this is acceptable. */
9542 if (GET_CODE (operands[0]) == MEM
9543 && GET_CODE (operands[1]) == MEM
9544 && mode == DImode
9545 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
9546 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
9547 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
9548 ? 32 : MEM_ALIGN (operands[0])))
9549 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
9550 ? 32
9551 : MEM_ALIGN (operands[1]))))
9552 && ! MEM_VOLATILE_P (operands [0])
9553 && ! MEM_VOLATILE_P (operands [1]))
9555 emit_move_insn (adjust_address (operands[0], SImode, 0),
9556 adjust_address (operands[1], SImode, 0));
9557 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9558 adjust_address (copy_rtx (operands[1]), SImode, 4));
9559 return;
9562 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9563 && !gpc_reg_operand (operands[1], mode))
9564 operands[1] = force_reg (mode, operands[1]);
9566 /* Recognize the case where operand[1] is a reference to thread-local
9567 data and load its address to a register. */
9568 if (tls_referenced_p (operands[1]))
9570 enum tls_model model;
9571 rtx tmp = operands[1];
9572 rtx addend = NULL;
9574 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9576 addend = XEXP (XEXP (tmp, 0), 1);
9577 tmp = XEXP (XEXP (tmp, 0), 0);
9580 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
9581 model = SYMBOL_REF_TLS_MODEL (tmp);
9582 gcc_assert (model != 0);
9584 tmp = rs6000_legitimize_tls_address (tmp, model);
9585 if (addend)
9587 tmp = gen_rtx_PLUS (mode, tmp, addend);
9588 tmp = force_operand (tmp, operands[0]);
9590 operands[1] = tmp;
9593 /* Handle the case where reload calls us with an invalid address. */
9594 if (reload_in_progress && mode == Pmode
9595 && (! general_operand (operands[1], mode)
9596 || ! nonimmediate_operand (operands[0], mode)))
9597 goto emit_set;
9599 /* 128-bit constant floating-point values on Darwin should really be loaded
9600 as two parts. However, this premature splitting is a problem when DFmode
9601 values can go into Altivec registers. */
9602 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
9603 && GET_CODE (operands[1]) == CONST_DOUBLE)
9605 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9606 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9607 DFmode);
9608 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9609 GET_MODE_SIZE (DFmode)),
9610 simplify_gen_subreg (DFmode, operands[1], mode,
9611 GET_MODE_SIZE (DFmode)),
9612 DFmode);
9613 return;
9616 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
9617 cfun->machine->sdmode_stack_slot =
9618 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
9621 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9622 p1:SD) if p1 is not of floating point class and p0 is spilled as
9623 we can have no analogous movsd_store for this. */
9624 if (lra_in_progress && mode == DDmode
9625 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9626 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9627 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
9628 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9630 enum reg_class cl;
9631 int regno = REGNO (SUBREG_REG (operands[1]));
9633 if (regno >= FIRST_PSEUDO_REGISTER)
9635 cl = reg_preferred_class (regno);
9636 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9638 if (regno >= 0 && ! FP_REGNO_P (regno))
9640 mode = SDmode;
9641 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9642 operands[1] = SUBREG_REG (operands[1]);
9645 if (lra_in_progress
9646 && mode == SDmode
9647 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9648 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9649 && (REG_P (operands[1])
9650 || (GET_CODE (operands[1]) == SUBREG
9651 && REG_P (SUBREG_REG (operands[1])))))
9653 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
9654 ? SUBREG_REG (operands[1]) : operands[1]);
9655 enum reg_class cl;
9657 if (regno >= FIRST_PSEUDO_REGISTER)
9659 cl = reg_preferred_class (regno);
9660 gcc_assert (cl != NO_REGS);
9661 regno = ira_class_hard_regs[cl][0];
9663 if (FP_REGNO_P (regno))
9665 if (GET_MODE (operands[0]) != DDmode)
9666 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9667 emit_insn (gen_movsd_store (operands[0], operands[1]));
9669 else if (INT_REGNO_P (regno))
9670 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9671 else
9672 gcc_unreachable();
9673 return;
9675 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9676 p:DD)) if p0 is not of floating point class and p1 is spilled as
9677 we can have no analogous movsd_load for this. */
9678 if (lra_in_progress && mode == DDmode
9679 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
9680 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9681 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
9682 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9684 enum reg_class cl;
9685 int regno = REGNO (SUBREG_REG (operands[0]));
9687 if (regno >= FIRST_PSEUDO_REGISTER)
9689 cl = reg_preferred_class (regno);
9690 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9692 if (regno >= 0 && ! FP_REGNO_P (regno))
9694 mode = SDmode;
9695 operands[0] = SUBREG_REG (operands[0]);
9696 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9699 if (lra_in_progress
9700 && mode == SDmode
9701 && (REG_P (operands[0])
9702 || (GET_CODE (operands[0]) == SUBREG
9703 && REG_P (SUBREG_REG (operands[0]))))
9704 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
9705 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9707 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
9708 ? SUBREG_REG (operands[0]) : operands[0]);
9709 enum reg_class cl;
9711 if (regno >= FIRST_PSEUDO_REGISTER)
9713 cl = reg_preferred_class (regno);
9714 gcc_assert (cl != NO_REGS);
9715 regno = ira_class_hard_regs[cl][0];
9717 if (FP_REGNO_P (regno))
9719 if (GET_MODE (operands[1]) != DDmode)
9720 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9721 emit_insn (gen_movsd_load (operands[0], operands[1]));
9723 else if (INT_REGNO_P (regno))
9724 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9725 else
9726 gcc_unreachable();
9727 return;
9730 if (reload_in_progress
9731 && mode == SDmode
9732 && cfun->machine->sdmode_stack_slot != NULL_RTX
9733 && MEM_P (operands[0])
9734 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
9735 && REG_P (operands[1]))
9737 if (FP_REGNO_P (REGNO (operands[1])))
9739 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
9740 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9741 emit_insn (gen_movsd_store (mem, operands[1]));
9743 else if (INT_REGNO_P (REGNO (operands[1])))
9745 rtx mem = operands[0];
9746 if (BYTES_BIG_ENDIAN)
9747 mem = adjust_address_nv (mem, mode, 4);
9748 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9749 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
9751 else
9752 gcc_unreachable();
9753 return;
9755 if (reload_in_progress
9756 && mode == SDmode
9757 && REG_P (operands[0])
9758 && MEM_P (operands[1])
9759 && cfun->machine->sdmode_stack_slot != NULL_RTX
9760 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
9762 if (FP_REGNO_P (REGNO (operands[0])))
9764 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
9765 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9766 emit_insn (gen_movsd_load (operands[0], mem));
9768 else if (INT_REGNO_P (REGNO (operands[0])))
9770 rtx mem = operands[1];
9771 if (BYTES_BIG_ENDIAN)
9772 mem = adjust_address_nv (mem, mode, 4);
9773 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9774 emit_insn (gen_movsd_hardfloat (operands[0], mem));
9776 else
9777 gcc_unreachable();
9778 return;
9781 /* FIXME: In the long term, this switch statement should go away
9782 and be replaced by a sequence of tests based on things like
9783 mode == Pmode. */
9784 switch (mode)
9786 case HImode:
9787 case QImode:
9788 if (CONSTANT_P (operands[1])
9789 && GET_CODE (operands[1]) != CONST_INT)
9790 operands[1] = force_const_mem (mode, operands[1]);
9791 break;
9793 case TFmode:
9794 case TDmode:
9795 case IFmode:
9796 case KFmode:
9797 if (FLOAT128_2REG_P (mode))
9798 rs6000_eliminate_indexed_memrefs (operands);
9799 /* fall through */
9801 case DFmode:
9802 case DDmode:
9803 case SFmode:
9804 case SDmode:
9805 if (CONSTANT_P (operands[1])
9806 && ! easy_fp_constant (operands[1], mode))
9807 operands[1] = force_const_mem (mode, operands[1]);
9808 break;
9810 case V16QImode:
9811 case V8HImode:
9812 case V4SFmode:
9813 case V4SImode:
9814 case V4HImode:
9815 case V2SFmode:
9816 case V2SImode:
9817 case V1DImode:
9818 case V2DFmode:
9819 case V2DImode:
9820 case V1TImode:
9821 if (CONSTANT_P (operands[1])
9822 && !easy_vector_constant (operands[1], mode))
9823 operands[1] = force_const_mem (mode, operands[1]);
9824 break;
9826 case SImode:
9827 case DImode:
9828 /* Use default pattern for address of ELF small data */
9829 if (TARGET_ELF
9830 && mode == Pmode
9831 && DEFAULT_ABI == ABI_V4
9832 && (GET_CODE (operands[1]) == SYMBOL_REF
9833 || GET_CODE (operands[1]) == CONST)
9834 && small_data_operand (operands[1], mode))
9836 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9837 return;
9840 if (DEFAULT_ABI == ABI_V4
9841 && mode == Pmode && mode == SImode
9842 && flag_pic == 1 && got_operand (operands[1], mode))
9844 emit_insn (gen_movsi_got (operands[0], operands[1]));
9845 return;
9848 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9849 && TARGET_NO_TOC
9850 && ! flag_pic
9851 && mode == Pmode
9852 && CONSTANT_P (operands[1])
9853 && GET_CODE (operands[1]) != HIGH
9854 && GET_CODE (operands[1]) != CONST_INT)
9856 rtx target = (!can_create_pseudo_p ()
9857 ? operands[0]
9858 : gen_reg_rtx (mode));
9860 /* If this is a function address on -mcall-aixdesc,
9861 convert it to the address of the descriptor. */
9862 if (DEFAULT_ABI == ABI_AIX
9863 && GET_CODE (operands[1]) == SYMBOL_REF
9864 && XSTR (operands[1], 0)[0] == '.')
9866 const char *name = XSTR (operands[1], 0);
9867 rtx new_ref;
9868 while (*name == '.')
9869 name++;
9870 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9871 CONSTANT_POOL_ADDRESS_P (new_ref)
9872 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9873 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9874 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9875 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9876 operands[1] = new_ref;
9879 if (DEFAULT_ABI == ABI_DARWIN)
9881 #if TARGET_MACHO
9882 if (MACHO_DYNAMIC_NO_PIC_P)
9884 /* Take care of any required data indirection. */
9885 operands[1] = rs6000_machopic_legitimize_pic_address (
9886 operands[1], mode, operands[0]);
9887 if (operands[0] != operands[1])
9888 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9889 return;
9891 #endif
9892 emit_insn (gen_macho_high (target, operands[1]));
9893 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9894 return;
9897 emit_insn (gen_elf_high (target, operands[1]));
9898 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9899 return;
9902 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9903 and we have put it in the TOC, we just need to make a TOC-relative
9904 reference to it. */
9905 if (TARGET_TOC
9906 && GET_CODE (operands[1]) == SYMBOL_REF
9907 && use_toc_relative_ref (operands[1], mode))
9908 operands[1] = create_TOC_reference (operands[1], operands[0]);
9909 else if (mode == Pmode
9910 && CONSTANT_P (operands[1])
9911 && GET_CODE (operands[1]) != HIGH
9912 && ((GET_CODE (operands[1]) != CONST_INT
9913 && ! easy_fp_constant (operands[1], mode))
9914 || (GET_CODE (operands[1]) == CONST_INT
9915 && (num_insns_constant (operands[1], mode)
9916 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
9917 || (GET_CODE (operands[0]) == REG
9918 && FP_REGNO_P (REGNO (operands[0]))))
9919 && !toc_relative_expr_p (operands[1], false)
9920 && (TARGET_CMODEL == CMODEL_SMALL
9921 || can_create_pseudo_p ()
9922 || (REG_P (operands[0])
9923 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
9926 #if TARGET_MACHO
9927 /* Darwin uses a special PIC legitimizer. */
9928 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
9930 operands[1] =
9931 rs6000_machopic_legitimize_pic_address (operands[1], mode,
9932 operands[0]);
9933 if (operands[0] != operands[1])
9934 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9935 return;
9937 #endif
9939 /* If we are to limit the number of things we put in the TOC and
9940 this is a symbol plus a constant we can add in one insn,
9941 just put the symbol in the TOC and add the constant. Don't do
9942 this if reload is in progress. */
9943 if (GET_CODE (operands[1]) == CONST
9944 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
9945 && GET_CODE (XEXP (operands[1], 0)) == PLUS
9946 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
9947 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
9948 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
9949 && ! side_effects_p (operands[0]))
9951 rtx sym =
9952 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
9953 rtx other = XEXP (XEXP (operands[1], 0), 1);
9955 sym = force_reg (mode, sym);
9956 emit_insn (gen_add3_insn (operands[0], sym, other));
9957 return;
9960 operands[1] = force_const_mem (mode, operands[1]);
9962 if (TARGET_TOC
9963 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
9964 && constant_pool_expr_p (XEXP (operands[1], 0))
9965 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
9966 get_pool_constant (XEXP (operands[1], 0)),
9967 get_pool_mode (XEXP (operands[1], 0))))
9969 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
9970 operands[0]);
9971 operands[1] = gen_const_mem (mode, tocref);
9972 set_mem_alias_set (operands[1], get_TOC_alias_set ());
9975 break;
9977 case TImode:
9978 if (!VECTOR_MEM_VSX_P (TImode))
9979 rs6000_eliminate_indexed_memrefs (operands);
9980 break;
9982 case PTImode:
9983 rs6000_eliminate_indexed_memrefs (operands);
9984 break;
9986 default:
9987 fatal_insn ("bad move", gen_rtx_SET (dest, source));
9990 /* Above, we may have called force_const_mem which may have returned
9991 an invalid address. If we can, fix this up; otherwise, reload will
9992 have to deal with it. */
9993 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
9994 operands[1] = validize_mem (operands[1]);
9996 emit_set:
9997 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10000 /* Return true if a structure, union or array containing FIELD should be
10001 accessed using `BLKMODE'.
10003 For the SPE, simd types are V2SI, and gcc can be tempted to put the
10004 entire thing in a DI and use subregs to access the internals.
10005 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
10006 back-end. Because a single GPR can hold a V2SI, but not a DI, the
10007 best thing to do is set structs to BLKmode and avoid Severe Tire
10008 Damage.
10010 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
10011 fit into 1, whereas DI still needs two. */
10013 static bool
10014 rs6000_member_type_forces_blk (const_tree field, machine_mode mode)
10016 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
10017 || (TARGET_E500_DOUBLE && mode == DFmode));
10020 /* Nonzero if we can use a floating-point register to pass this arg. */
10021 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10022 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10023 && (CUM)->fregno <= FP_ARG_MAX_REG \
10024 && TARGET_HARD_FLOAT && TARGET_FPRS)
10026 /* Nonzero if we can use an AltiVec register to pass this arg. */
10027 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10028 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10029 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10030 && TARGET_ALTIVEC_ABI \
10031 && (NAMED))
10033 /* Walk down the type tree of TYPE counting consecutive base elements.
10034 If *MODEP is VOIDmode, then set it to the first valid floating point
10035 or vector type. If a non-floating point or vector type is found, or
10036 if a floating point or vector type that doesn't match a non-VOIDmode
10037 *MODEP is found, then return -1, otherwise return the count in the
10038 sub-tree. */
10040 static int
10041 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10043 machine_mode mode;
10044 HOST_WIDE_INT size;
10046 switch (TREE_CODE (type))
10048 case REAL_TYPE:
10049 mode = TYPE_MODE (type);
10050 if (!SCALAR_FLOAT_MODE_P (mode))
10051 return -1;
10053 if (*modep == VOIDmode)
10054 *modep = mode;
10056 if (*modep == mode)
10057 return 1;
10059 break;
10061 case COMPLEX_TYPE:
10062 mode = TYPE_MODE (TREE_TYPE (type));
10063 if (!SCALAR_FLOAT_MODE_P (mode))
10064 return -1;
10066 if (*modep == VOIDmode)
10067 *modep = mode;
10069 if (*modep == mode)
10070 return 2;
10072 break;
10074 case VECTOR_TYPE:
10075 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10076 return -1;
10078 /* Use V4SImode as representative of all 128-bit vector types. */
10079 size = int_size_in_bytes (type);
10080 switch (size)
10082 case 16:
10083 mode = V4SImode;
10084 break;
10085 default:
10086 return -1;
10089 if (*modep == VOIDmode)
10090 *modep = mode;
10092 /* Vector modes are considered to be opaque: two vectors are
10093 equivalent for the purposes of being homogeneous aggregates
10094 if they are the same size. */
10095 if (*modep == mode)
10096 return 1;
10098 break;
10100 case ARRAY_TYPE:
10102 int count;
10103 tree index = TYPE_DOMAIN (type);
10105 /* Can't handle incomplete types nor sizes that are not
10106 fixed. */
10107 if (!COMPLETE_TYPE_P (type)
10108 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10109 return -1;
10111 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10112 if (count == -1
10113 || !index
10114 || !TYPE_MAX_VALUE (index)
10115 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10116 || !TYPE_MIN_VALUE (index)
10117 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10118 || count < 0)
10119 return -1;
10121 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10122 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10124 /* There must be no padding. */
10125 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10126 return -1;
10128 return count;
10131 case RECORD_TYPE:
10133 int count = 0;
10134 int sub_count;
10135 tree field;
10137 /* Can't handle incomplete types nor sizes that are not
10138 fixed. */
10139 if (!COMPLETE_TYPE_P (type)
10140 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10141 return -1;
10143 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10145 if (TREE_CODE (field) != FIELD_DECL)
10146 continue;
10148 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10149 if (sub_count < 0)
10150 return -1;
10151 count += sub_count;
10154 /* There must be no padding. */
10155 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10156 return -1;
10158 return count;
10161 case UNION_TYPE:
10162 case QUAL_UNION_TYPE:
10164 /* These aren't very interesting except in a degenerate case. */
10165 int count = 0;
10166 int sub_count;
10167 tree field;
10169 /* Can't handle incomplete types nor sizes that are not
10170 fixed. */
10171 if (!COMPLETE_TYPE_P (type)
10172 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10173 return -1;
10175 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10177 if (TREE_CODE (field) != FIELD_DECL)
10178 continue;
10180 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10181 if (sub_count < 0)
10182 return -1;
10183 count = count > sub_count ? count : sub_count;
10186 /* There must be no padding. */
10187 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10188 return -1;
10190 return count;
10193 default:
10194 break;
10197 return -1;
10200 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10201 float or vector aggregate that shall be passed in FP/vector registers
10202 according to the ELFv2 ABI, return the homogeneous element mode in
10203 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10205 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10207 static bool
10208 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10209 machine_mode *elt_mode,
10210 int *n_elts)
10212 /* Note that we do not accept complex types at the top level as
10213 homogeneous aggregates; these types are handled via the
10214 targetm.calls.split_complex_arg mechanism. Complex types
10215 can be elements of homogeneous aggregates, however. */
10216 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
10218 machine_mode field_mode = VOIDmode;
10219 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10221 if (field_count > 0)
10223 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
10224 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
10226 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10227 up to AGGR_ARG_NUM_REG registers. */
10228 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
10230 if (elt_mode)
10231 *elt_mode = field_mode;
10232 if (n_elts)
10233 *n_elts = field_count;
10234 return true;
10239 if (elt_mode)
10240 *elt_mode = mode;
10241 if (n_elts)
10242 *n_elts = 1;
10243 return false;
10246 /* Return a nonzero value to say to return the function value in
10247 memory, just as large structures are always returned. TYPE will be
10248 the data type of the value, and FNTYPE will be the type of the
10249 function doing the returning, or @code{NULL} for libcalls.
10251 The AIX ABI for the RS/6000 specifies that all structures are
10252 returned in memory. The Darwin ABI does the same.
10254 For the Darwin 64 Bit ABI, a function result can be returned in
10255 registers or in memory, depending on the size of the return data
10256 type. If it is returned in registers, the value occupies the same
10257 registers as it would if it were the first and only function
10258 argument. Otherwise, the function places its result in memory at
10259 the location pointed to by GPR3.
10261 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10262 but a draft put them in memory, and GCC used to implement the draft
10263 instead of the final standard. Therefore, aix_struct_return
10264 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10265 compatibility can change DRAFT_V4_STRUCT_RET to override the
10266 default, and -m switches get the final word. See
10267 rs6000_option_override_internal for more details.
10269 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10270 long double support is enabled. These values are returned in memory.
10272 int_size_in_bytes returns -1 for variable size objects, which go in
10273 memory always. The cast to unsigned makes -1 > 8. */
10275 static bool
10276 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10278 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10279 if (TARGET_MACHO
10280 && rs6000_darwin64_abi
10281 && TREE_CODE (type) == RECORD_TYPE
10282 && int_size_in_bytes (type) > 0)
10284 CUMULATIVE_ARGS valcum;
10285 rtx valret;
10287 valcum.words = 0;
10288 valcum.fregno = FP_ARG_MIN_REG;
10289 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10290 /* Do a trial code generation as if this were going to be passed
10291 as an argument; if any part goes in memory, we return NULL. */
10292 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10293 if (valret)
10294 return false;
10295 /* Otherwise fall through to more conventional ABI rules. */
10298 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10299 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10300 NULL, NULL))
10301 return false;
10303 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10304 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10305 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10306 return false;
10308 if (AGGREGATE_TYPE_P (type)
10309 && (aix_struct_return
10310 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10311 return true;
10313 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10314 modes only exist for GCC vector types if -maltivec. */
10315 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10316 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10317 return false;
10319 /* Return synthetic vectors in memory. */
10320 if (TREE_CODE (type) == VECTOR_TYPE
10321 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10323 static bool warned_for_return_big_vectors = false;
10324 if (!warned_for_return_big_vectors)
10326 warning (0, "GCC vector returned by reference: "
10327 "non-standard ABI extension with no compatibility guarantee");
10328 warned_for_return_big_vectors = true;
10330 return true;
10333 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10334 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10335 return true;
10337 return false;
10340 /* Specify whether values returned in registers should be at the most
10341 significant end of a register. We want aggregates returned by
10342 value to match the way aggregates are passed to functions. */
10344 static bool
10345 rs6000_return_in_msb (const_tree valtype)
10347 return (DEFAULT_ABI == ABI_ELFv2
10348 && BYTES_BIG_ENDIAN
10349 && AGGREGATE_TYPE_P (valtype)
10350 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
10353 #ifdef HAVE_AS_GNU_ATTRIBUTE
10354 /* Return TRUE if a call to function FNDECL may be one that
10355 potentially affects the function calling ABI of the object file. */
10357 static bool
10358 call_ABI_of_interest (tree fndecl)
10360 if (symtab->state == EXPANSION)
10362 struct cgraph_node *c_node;
10364 /* Libcalls are always interesting. */
10365 if (fndecl == NULL_TREE)
10366 return true;
10368 /* Any call to an external function is interesting. */
10369 if (DECL_EXTERNAL (fndecl))
10370 return true;
10372 /* Interesting functions that we are emitting in this object file. */
10373 c_node = cgraph_node::get (fndecl);
10374 c_node = c_node->ultimate_alias_target ();
10375 return !c_node->only_called_directly_p ();
10377 return false;
10379 #endif
10381 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10382 for a call to a function whose data type is FNTYPE.
10383 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10385 For incoming args we set the number of arguments in the prototype large
10386 so we never return a PARALLEL. */
10388 void
10389 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10390 rtx libname ATTRIBUTE_UNUSED, int incoming,
10391 int libcall, int n_named_args,
10392 tree fndecl ATTRIBUTE_UNUSED,
10393 machine_mode return_mode ATTRIBUTE_UNUSED)
10395 static CUMULATIVE_ARGS zero_cumulative;
10397 *cum = zero_cumulative;
10398 cum->words = 0;
10399 cum->fregno = FP_ARG_MIN_REG;
10400 cum->vregno = ALTIVEC_ARG_MIN_REG;
10401 cum->prototype = (fntype && prototype_p (fntype));
10402 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10403 ? CALL_LIBCALL : CALL_NORMAL);
10404 cum->sysv_gregno = GP_ARG_MIN_REG;
10405 cum->stdarg = stdarg_p (fntype);
10406 cum->libcall = libcall;
10408 cum->nargs_prototype = 0;
10409 if (incoming || cum->prototype)
10410 cum->nargs_prototype = n_named_args;
10412 /* Check for a longcall attribute. */
10413 if ((!fntype && rs6000_default_long_calls)
10414 || (fntype
10415 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10416 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10417 cum->call_cookie |= CALL_LONG;
10419 if (TARGET_DEBUG_ARG)
10421 fprintf (stderr, "\ninit_cumulative_args:");
10422 if (fntype)
10424 tree ret_type = TREE_TYPE (fntype);
10425 fprintf (stderr, " ret code = %s,",
10426 get_tree_code_name (TREE_CODE (ret_type)));
10429 if (cum->call_cookie & CALL_LONG)
10430 fprintf (stderr, " longcall,");
10432 fprintf (stderr, " proto = %d, nargs = %d\n",
10433 cum->prototype, cum->nargs_prototype);
10436 #ifdef HAVE_AS_GNU_ATTRIBUTE
10437 if (DEFAULT_ABI == ABI_V4)
10439 cum->escapes = call_ABI_of_interest (fndecl);
10440 if (cum->escapes)
10442 tree return_type;
10444 if (fntype)
10446 return_type = TREE_TYPE (fntype);
10447 return_mode = TYPE_MODE (return_type);
10449 else
10450 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10452 if (return_type != NULL)
10454 if (TREE_CODE (return_type) == RECORD_TYPE
10455 && TYPE_TRANSPARENT_AGGR (return_type))
10457 return_type = TREE_TYPE (first_field (return_type));
10458 return_mode = TYPE_MODE (return_type);
10460 if (AGGREGATE_TYPE_P (return_type)
10461 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10462 <= 8))
10463 rs6000_returns_struct = true;
10465 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (return_mode))
10466 rs6000_passes_float = true;
10467 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
10468 || SPE_VECTOR_MODE (return_mode))
10469 rs6000_passes_vector = true;
10472 #endif
10474 if (fntype
10475 && !TARGET_ALTIVEC
10476 && TARGET_ALTIVEC_ABI
10477 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10479 error ("cannot return value in vector register because"
10480 " altivec instructions are disabled, use -maltivec"
10481 " to enable them");
10485 /* The mode the ABI uses for a word. This is not the same as word_mode
10486 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10488 static machine_mode
10489 rs6000_abi_word_mode (void)
10491 return TARGET_32BIT ? SImode : DImode;
10494 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10495 static char *
10496 rs6000_offload_options (void)
10498 if (TARGET_64BIT)
10499 return xstrdup ("-foffload-abi=lp64");
10500 else
10501 return xstrdup ("-foffload-abi=ilp32");
10504 /* On rs6000, function arguments are promoted, as are function return
10505 values. */
10507 static machine_mode
10508 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10509 machine_mode mode,
10510 int *punsignedp ATTRIBUTE_UNUSED,
10511 const_tree, int)
10513 PROMOTE_MODE (mode, *punsignedp, type);
10515 return mode;
10518 /* Return true if TYPE must be passed on the stack and not in registers. */
10520 static bool
10521 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10523 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10524 return must_pass_in_stack_var_size (mode, type);
10525 else
10526 return must_pass_in_stack_var_size_or_pad (mode, type);
10529 static inline bool
10530 is_complex_IBM_long_double (machine_mode mode)
10532 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
10535 /* Whether ABI_V4 passes MODE args to a function in floating point
10536 registers. */
10538 static bool
10539 abi_v4_pass_in_fpr (machine_mode mode)
10541 if (!TARGET_FPRS || !TARGET_HARD_FLOAT)
10542 return false;
10543 if (TARGET_SINGLE_FLOAT && mode == SFmode)
10544 return true;
10545 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
10546 return true;
10547 /* ABI_V4 passes complex IBM long double in 8 gprs.
10548 Stupid, but we can't change the ABI now. */
10549 if (is_complex_IBM_long_double (mode))
10550 return false;
10551 if (FLOAT128_2REG_P (mode))
10552 return true;
10553 if (DECIMAL_FLOAT_MODE_P (mode))
10554 return true;
10555 return false;
10558 /* If defined, a C expression which determines whether, and in which
10559 direction, to pad out an argument with extra space. The value
10560 should be of type `enum direction': either `upward' to pad above
10561 the argument, `downward' to pad below, or `none' to inhibit
10562 padding.
10564 For the AIX ABI structs are always stored left shifted in their
10565 argument slot. */
10567 enum direction
10568 function_arg_padding (machine_mode mode, const_tree type)
10570 #ifndef AGGREGATE_PADDING_FIXED
10571 #define AGGREGATE_PADDING_FIXED 0
10572 #endif
10573 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10574 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10575 #endif
10577 if (!AGGREGATE_PADDING_FIXED)
10579 /* GCC used to pass structures of the same size as integer types as
10580 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
10581 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10582 passed padded downward, except that -mstrict-align further
10583 muddied the water in that multi-component structures of 2 and 4
10584 bytes in size were passed padded upward.
10586 The following arranges for best compatibility with previous
10587 versions of gcc, but removes the -mstrict-align dependency. */
10588 if (BYTES_BIG_ENDIAN)
10590 HOST_WIDE_INT size = 0;
10592 if (mode == BLKmode)
10594 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10595 size = int_size_in_bytes (type);
10597 else
10598 size = GET_MODE_SIZE (mode);
10600 if (size == 1 || size == 2 || size == 4)
10601 return downward;
10603 return upward;
10606 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10608 if (type != 0 && AGGREGATE_TYPE_P (type))
10609 return upward;
10612 /* Fall back to the default. */
10613 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10616 /* If defined, a C expression that gives the alignment boundary, in bits,
10617 of an argument with the specified mode and type. If it is not defined,
10618 PARM_BOUNDARY is used for all arguments.
10620 V.4 wants long longs and doubles to be double word aligned. Just
10621 testing the mode size is a boneheaded way to do this as it means
10622 that other types such as complex int are also double word aligned.
10623 However, we're stuck with this because changing the ABI might break
10624 existing library interfaces.
10626 Doubleword align SPE vectors.
10627 Quadword align Altivec/VSX vectors.
10628 Quadword align large synthetic vector types. */
10630 static unsigned int
10631 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10633 machine_mode elt_mode;
10634 int n_elts;
10636 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10638 if (DEFAULT_ABI == ABI_V4
10639 && (GET_MODE_SIZE (mode) == 8
10640 || (TARGET_HARD_FLOAT
10641 && TARGET_FPRS
10642 && !is_complex_IBM_long_double (mode)
10643 && FLOAT128_2REG_P (mode))))
10644 return 64;
10645 else if (FLOAT128_VECTOR_P (mode))
10646 return 128;
10647 else if (SPE_VECTOR_MODE (mode)
10648 || (type && TREE_CODE (type) == VECTOR_TYPE
10649 && int_size_in_bytes (type) >= 8
10650 && int_size_in_bytes (type) < 16))
10651 return 64;
10652 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10653 || (type && TREE_CODE (type) == VECTOR_TYPE
10654 && int_size_in_bytes (type) >= 16))
10655 return 128;
10657 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10658 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10659 -mcompat-align-parm is used. */
10660 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10661 || DEFAULT_ABI == ABI_ELFv2)
10662 && type && TYPE_ALIGN (type) > 64)
10664 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10665 or homogeneous float/vector aggregates here. We already handled
10666 vector aggregates above, but still need to check for float here. */
10667 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10668 && !SCALAR_FLOAT_MODE_P (elt_mode));
10670 /* We used to check for BLKmode instead of the above aggregate type
10671 check. Warn when this results in any difference to the ABI. */
10672 if (aggregate_p != (mode == BLKmode))
10674 static bool warned;
10675 if (!warned && warn_psabi)
10677 warned = true;
10678 inform (input_location,
10679 "the ABI of passing aggregates with %d-byte alignment"
10680 " has changed in GCC 5",
10681 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10685 if (aggregate_p)
10686 return 128;
10689 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10690 implement the "aggregate type" check as a BLKmode check here; this
10691 means certain aggregate types are in fact not aligned. */
10692 if (TARGET_MACHO && rs6000_darwin64_abi
10693 && mode == BLKmode
10694 && type && TYPE_ALIGN (type) > 64)
10695 return 128;
10697 return PARM_BOUNDARY;
10700 /* The offset in words to the start of the parameter save area. */
10702 static unsigned int
10703 rs6000_parm_offset (void)
10705 return (DEFAULT_ABI == ABI_V4 ? 2
10706 : DEFAULT_ABI == ABI_ELFv2 ? 4
10707 : 6);
10710 /* For a function parm of MODE and TYPE, return the starting word in
10711 the parameter area. NWORDS of the parameter area are already used. */
10713 static unsigned int
10714 rs6000_parm_start (machine_mode mode, const_tree type,
10715 unsigned int nwords)
10717 unsigned int align;
10719 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10720 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10723 /* Compute the size (in words) of a function argument. */
10725 static unsigned long
10726 rs6000_arg_size (machine_mode mode, const_tree type)
10728 unsigned long size;
10730 if (mode != BLKmode)
10731 size = GET_MODE_SIZE (mode);
10732 else
10733 size = int_size_in_bytes (type);
10735 if (TARGET_32BIT)
10736 return (size + 3) >> 2;
10737 else
10738 return (size + 7) >> 3;
10741 /* Use this to flush pending int fields. */
10743 static void
10744 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10745 HOST_WIDE_INT bitpos, int final)
10747 unsigned int startbit, endbit;
10748 int intregs, intoffset;
10749 machine_mode mode;
10751 /* Handle the situations where a float is taking up the first half
10752 of the GPR, and the other half is empty (typically due to
10753 alignment restrictions). We can detect this by a 8-byte-aligned
10754 int field, or by seeing that this is the final flush for this
10755 argument. Count the word and continue on. */
10756 if (cum->floats_in_gpr == 1
10757 && (cum->intoffset % 64 == 0
10758 || (cum->intoffset == -1 && final)))
10760 cum->words++;
10761 cum->floats_in_gpr = 0;
10764 if (cum->intoffset == -1)
10765 return;
10767 intoffset = cum->intoffset;
10768 cum->intoffset = -1;
10769 cum->floats_in_gpr = 0;
10771 if (intoffset % BITS_PER_WORD != 0)
10773 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
10774 MODE_INT, 0);
10775 if (mode == BLKmode)
10777 /* We couldn't find an appropriate mode, which happens,
10778 e.g., in packed structs when there are 3 bytes to load.
10779 Back intoffset back to the beginning of the word in this
10780 case. */
10781 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10785 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10786 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10787 intregs = (endbit - startbit) / BITS_PER_WORD;
10788 cum->words += intregs;
10789 /* words should be unsigned. */
10790 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10792 int pad = (endbit/BITS_PER_WORD) - cum->words;
10793 cum->words += pad;
10797 /* The darwin64 ABI calls for us to recurse down through structs,
10798 looking for elements passed in registers. Unfortunately, we have
10799 to track int register count here also because of misalignments
10800 in powerpc alignment mode. */
10802 static void
10803 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10804 const_tree type,
10805 HOST_WIDE_INT startbitpos)
10807 tree f;
10809 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10810 if (TREE_CODE (f) == FIELD_DECL)
10812 HOST_WIDE_INT bitpos = startbitpos;
10813 tree ftype = TREE_TYPE (f);
10814 machine_mode mode;
10815 if (ftype == error_mark_node)
10816 continue;
10817 mode = TYPE_MODE (ftype);
10819 if (DECL_SIZE (f) != 0
10820 && tree_fits_uhwi_p (bit_position (f)))
10821 bitpos += int_bit_position (f);
10823 /* ??? FIXME: else assume zero offset. */
10825 if (TREE_CODE (ftype) == RECORD_TYPE)
10826 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10827 else if (USE_FP_FOR_ARG_P (cum, mode))
10829 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10830 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10831 cum->fregno += n_fpregs;
10832 /* Single-precision floats present a special problem for
10833 us, because they are smaller than an 8-byte GPR, and so
10834 the structure-packing rules combined with the standard
10835 varargs behavior mean that we want to pack float/float
10836 and float/int combinations into a single register's
10837 space. This is complicated by the arg advance flushing,
10838 which works on arbitrarily large groups of int-type
10839 fields. */
10840 if (mode == SFmode)
10842 if (cum->floats_in_gpr == 1)
10844 /* Two floats in a word; count the word and reset
10845 the float count. */
10846 cum->words++;
10847 cum->floats_in_gpr = 0;
10849 else if (bitpos % 64 == 0)
10851 /* A float at the beginning of an 8-byte word;
10852 count it and put off adjusting cum->words until
10853 we see if a arg advance flush is going to do it
10854 for us. */
10855 cum->floats_in_gpr++;
10857 else
10859 /* The float is at the end of a word, preceded
10860 by integer fields, so the arg advance flush
10861 just above has already set cum->words and
10862 everything is taken care of. */
10865 else
10866 cum->words += n_fpregs;
10868 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10870 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10871 cum->vregno++;
10872 cum->words += 2;
10874 else if (cum->intoffset == -1)
10875 cum->intoffset = bitpos;
10879 /* Check for an item that needs to be considered specially under the darwin 64
10880 bit ABI. These are record types where the mode is BLK or the structure is
10881 8 bytes in size. */
10882 static int
10883 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10885 return rs6000_darwin64_abi
10886 && ((mode == BLKmode
10887 && TREE_CODE (type) == RECORD_TYPE
10888 && int_size_in_bytes (type) > 0)
10889 || (type && TREE_CODE (type) == RECORD_TYPE
10890 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10893 /* Update the data in CUM to advance over an argument
10894 of mode MODE and data type TYPE.
10895 (TYPE is null for libcalls where that information may not be available.)
10897 Note that for args passed by reference, function_arg will be called
10898 with MODE and TYPE set to that of the pointer to the arg, not the arg
10899 itself. */
10901 static void
10902 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
10903 const_tree type, bool named, int depth)
10905 machine_mode elt_mode;
10906 int n_elts;
10908 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10910 /* Only tick off an argument if we're not recursing. */
10911 if (depth == 0)
10912 cum->nargs_prototype--;
10914 #ifdef HAVE_AS_GNU_ATTRIBUTE
10915 if (DEFAULT_ABI == ABI_V4
10916 && cum->escapes)
10918 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode))
10919 rs6000_passes_float = true;
10920 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
10921 rs6000_passes_vector = true;
10922 else if (SPE_VECTOR_MODE (mode)
10923 && !cum->stdarg
10924 && cum->sysv_gregno <= GP_ARG_MAX_REG)
10925 rs6000_passes_vector = true;
10927 #endif
10929 if (TARGET_ALTIVEC_ABI
10930 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10931 || (type && TREE_CODE (type) == VECTOR_TYPE
10932 && int_size_in_bytes (type) == 16)))
10934 bool stack = false;
10936 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10938 cum->vregno += n_elts;
10940 if (!TARGET_ALTIVEC)
10941 error ("cannot pass argument in vector register because"
10942 " altivec instructions are disabled, use -maltivec"
10943 " to enable them");
10945 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
10946 even if it is going to be passed in a vector register.
10947 Darwin does the same for variable-argument functions. */
10948 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10949 && TARGET_64BIT)
10950 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
10951 stack = true;
10953 else
10954 stack = true;
10956 if (stack)
10958 int align;
10960 /* Vector parameters must be 16-byte aligned. In 32-bit
10961 mode this means we need to take into account the offset
10962 to the parameter save area. In 64-bit mode, they just
10963 have to start on an even word, since the parameter save
10964 area is 16-byte aligned. */
10965 if (TARGET_32BIT)
10966 align = -(rs6000_parm_offset () + cum->words) & 3;
10967 else
10968 align = cum->words & 1;
10969 cum->words += align + rs6000_arg_size (mode, type);
10971 if (TARGET_DEBUG_ARG)
10973 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
10974 cum->words, align);
10975 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
10976 cum->nargs_prototype, cum->prototype,
10977 GET_MODE_NAME (mode));
10981 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
10982 && !cum->stdarg
10983 && cum->sysv_gregno <= GP_ARG_MAX_REG)
10984 cum->sysv_gregno++;
10986 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10988 int size = int_size_in_bytes (type);
10989 /* Variable sized types have size == -1 and are
10990 treated as if consisting entirely of ints.
10991 Pad to 16 byte boundary if needed. */
10992 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
10993 && (cum->words % 2) != 0)
10994 cum->words++;
10995 /* For varargs, we can just go up by the size of the struct. */
10996 if (!named)
10997 cum->words += (size + 7) / 8;
10998 else
11000 /* It is tempting to say int register count just goes up by
11001 sizeof(type)/8, but this is wrong in a case such as
11002 { int; double; int; } [powerpc alignment]. We have to
11003 grovel through the fields for these too. */
11004 cum->intoffset = 0;
11005 cum->floats_in_gpr = 0;
11006 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11007 rs6000_darwin64_record_arg_advance_flush (cum,
11008 size * BITS_PER_UNIT, 1);
11010 if (TARGET_DEBUG_ARG)
11012 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11013 cum->words, TYPE_ALIGN (type), size);
11014 fprintf (stderr,
11015 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11016 cum->nargs_prototype, cum->prototype,
11017 GET_MODE_NAME (mode));
11020 else if (DEFAULT_ABI == ABI_V4)
11022 if (abi_v4_pass_in_fpr (mode))
11024 /* _Decimal128 must use an even/odd register pair. This assumes
11025 that the register number is odd when fregno is odd. */
11026 if (mode == TDmode && (cum->fregno % 2) == 1)
11027 cum->fregno++;
11029 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11030 <= FP_ARG_V4_MAX_REG)
11031 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11032 else
11034 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11035 if (mode == DFmode || FLOAT128_IBM_P (mode)
11036 || mode == DDmode || mode == TDmode)
11037 cum->words += cum->words & 1;
11038 cum->words += rs6000_arg_size (mode, type);
11041 else
11043 int n_words = rs6000_arg_size (mode, type);
11044 int gregno = cum->sysv_gregno;
11046 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
11047 (r7,r8) or (r9,r10). As does any other 2 word item such
11048 as complex int due to a historical mistake. */
11049 if (n_words == 2)
11050 gregno += (1 - gregno) & 1;
11052 /* Multi-reg args are not split between registers and stack. */
11053 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11055 /* Long long and SPE vectors are aligned on the stack.
11056 So are other 2 word items such as complex int due to
11057 a historical mistake. */
11058 if (n_words == 2)
11059 cum->words += cum->words & 1;
11060 cum->words += n_words;
11063 /* Note: continuing to accumulate gregno past when we've started
11064 spilling to the stack indicates the fact that we've started
11065 spilling to the stack to expand_builtin_saveregs. */
11066 cum->sysv_gregno = gregno + n_words;
11069 if (TARGET_DEBUG_ARG)
11071 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11072 cum->words, cum->fregno);
11073 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11074 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11075 fprintf (stderr, "mode = %4s, named = %d\n",
11076 GET_MODE_NAME (mode), named);
11079 else
11081 int n_words = rs6000_arg_size (mode, type);
11082 int start_words = cum->words;
11083 int align_words = rs6000_parm_start (mode, type, start_words);
11085 cum->words = align_words + n_words;
11087 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
11089 /* _Decimal128 must be passed in an even/odd float register pair.
11090 This assumes that the register number is odd when fregno is
11091 odd. */
11092 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11093 cum->fregno++;
11094 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11097 if (TARGET_DEBUG_ARG)
11099 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11100 cum->words, cum->fregno);
11101 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11102 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11103 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11104 named, align_words - start_words, depth);
11109 static void
11110 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11111 const_tree type, bool named)
11113 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11117 static rtx
11118 spe_build_register_parallel (machine_mode mode, int gregno)
11120 rtx r1, r3, r5, r7;
11122 switch (mode)
11124 case DFmode:
11125 r1 = gen_rtx_REG (DImode, gregno);
11126 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11127 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
11129 case DCmode:
11130 case TFmode:
11131 r1 = gen_rtx_REG (DImode, gregno);
11132 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11133 r3 = gen_rtx_REG (DImode, gregno + 2);
11134 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
11135 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
11137 case TCmode:
11138 r1 = gen_rtx_REG (DImode, gregno);
11139 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11140 r3 = gen_rtx_REG (DImode, gregno + 2);
11141 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
11142 r5 = gen_rtx_REG (DImode, gregno + 4);
11143 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
11144 r7 = gen_rtx_REG (DImode, gregno + 6);
11145 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
11146 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
11148 default:
11149 gcc_unreachable ();
11153 /* Determine where to put a SIMD argument on the SPE. */
11154 static rtx
11155 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, machine_mode mode,
11156 const_tree type)
11158 int gregno = cum->sysv_gregno;
11160 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
11161 are passed and returned in a pair of GPRs for ABI compatibility. */
11162 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
11163 || mode == DCmode || mode == TCmode))
11165 int n_words = rs6000_arg_size (mode, type);
11167 /* Doubles go in an odd/even register pair (r5/r6, etc). */
11168 if (mode == DFmode)
11169 gregno += (1 - gregno) & 1;
11171 /* Multi-reg args are not split between registers and stack. */
11172 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11173 return NULL_RTX;
11175 return spe_build_register_parallel (mode, gregno);
11177 if (cum->stdarg)
11179 int n_words = rs6000_arg_size (mode, type);
11181 /* SPE vectors are put in odd registers. */
11182 if (n_words == 2 && (gregno & 1) == 0)
11183 gregno += 1;
11185 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
11187 rtx r1, r2;
11188 machine_mode m = SImode;
11190 r1 = gen_rtx_REG (m, gregno);
11191 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
11192 r2 = gen_rtx_REG (m, gregno + 1);
11193 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
11194 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
11196 else
11197 return NULL_RTX;
11199 else
11201 if (gregno <= GP_ARG_MAX_REG)
11202 return gen_rtx_REG (mode, gregno);
11203 else
11204 return NULL_RTX;
11208 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11209 structure between cum->intoffset and bitpos to integer registers. */
11211 static void
11212 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11213 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11215 machine_mode mode;
11216 unsigned int regno;
11217 unsigned int startbit, endbit;
11218 int this_regno, intregs, intoffset;
11219 rtx reg;
11221 if (cum->intoffset == -1)
11222 return;
11224 intoffset = cum->intoffset;
11225 cum->intoffset = -1;
11227 /* If this is the trailing part of a word, try to only load that
11228 much into the register. Otherwise load the whole register. Note
11229 that in the latter case we may pick up unwanted bits. It's not a
11230 problem at the moment but may wish to revisit. */
11232 if (intoffset % BITS_PER_WORD != 0)
11234 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
11235 MODE_INT, 0);
11236 if (mode == BLKmode)
11238 /* We couldn't find an appropriate mode, which happens,
11239 e.g., in packed structs when there are 3 bytes to load.
11240 Back intoffset back to the beginning of the word in this
11241 case. */
11242 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11243 mode = word_mode;
11246 else
11247 mode = word_mode;
11249 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11250 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11251 intregs = (endbit - startbit) / BITS_PER_WORD;
11252 this_regno = cum->words + intoffset / BITS_PER_WORD;
11254 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11255 cum->use_stack = 1;
11257 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11258 if (intregs <= 0)
11259 return;
11261 intoffset /= BITS_PER_UNIT;
11264 regno = GP_ARG_MIN_REG + this_regno;
11265 reg = gen_rtx_REG (mode, regno);
11266 rvec[(*k)++] =
11267 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11269 this_regno += 1;
11270 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11271 mode = word_mode;
11272 intregs -= 1;
11274 while (intregs > 0);
11277 /* Recursive workhorse for the following. */
11279 static void
11280 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11281 HOST_WIDE_INT startbitpos, rtx rvec[],
11282 int *k)
11284 tree f;
11286 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11287 if (TREE_CODE (f) == FIELD_DECL)
11289 HOST_WIDE_INT bitpos = startbitpos;
11290 tree ftype = TREE_TYPE (f);
11291 machine_mode mode;
11292 if (ftype == error_mark_node)
11293 continue;
11294 mode = TYPE_MODE (ftype);
11296 if (DECL_SIZE (f) != 0
11297 && tree_fits_uhwi_p (bit_position (f)))
11298 bitpos += int_bit_position (f);
11300 /* ??? FIXME: else assume zero offset. */
11302 if (TREE_CODE (ftype) == RECORD_TYPE)
11303 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11304 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11306 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11307 #if 0
11308 switch (mode)
11310 case SCmode: mode = SFmode; break;
11311 case DCmode: mode = DFmode; break;
11312 case TCmode: mode = TFmode; break;
11313 default: break;
11315 #endif
11316 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11317 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11319 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11320 && (mode == TFmode || mode == TDmode));
11321 /* Long double or _Decimal128 split over regs and memory. */
11322 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11323 cum->use_stack=1;
11325 rvec[(*k)++]
11326 = gen_rtx_EXPR_LIST (VOIDmode,
11327 gen_rtx_REG (mode, cum->fregno++),
11328 GEN_INT (bitpos / BITS_PER_UNIT));
11329 if (FLOAT128_2REG_P (mode))
11330 cum->fregno++;
11332 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11334 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11335 rvec[(*k)++]
11336 = gen_rtx_EXPR_LIST (VOIDmode,
11337 gen_rtx_REG (mode, cum->vregno++),
11338 GEN_INT (bitpos / BITS_PER_UNIT));
11340 else if (cum->intoffset == -1)
11341 cum->intoffset = bitpos;
11345 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11346 the register(s) to be used for each field and subfield of a struct
11347 being passed by value, along with the offset of where the
11348 register's value may be found in the block. FP fields go in FP
11349 register, vector fields go in vector registers, and everything
11350 else goes in int registers, packed as in memory.
11352 This code is also used for function return values. RETVAL indicates
11353 whether this is the case.
11355 Much of this is taken from the SPARC V9 port, which has a similar
11356 calling convention. */
11358 static rtx
11359 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11360 bool named, bool retval)
11362 rtx rvec[FIRST_PSEUDO_REGISTER];
11363 int k = 1, kbase = 1;
11364 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11365 /* This is a copy; modifications are not visible to our caller. */
11366 CUMULATIVE_ARGS copy_cum = *orig_cum;
11367 CUMULATIVE_ARGS *cum = &copy_cum;
11369 /* Pad to 16 byte boundary if needed. */
11370 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11371 && (cum->words % 2) != 0)
11372 cum->words++;
11374 cum->intoffset = 0;
11375 cum->use_stack = 0;
11376 cum->named = named;
11378 /* Put entries into rvec[] for individual FP and vector fields, and
11379 for the chunks of memory that go in int regs. Note we start at
11380 element 1; 0 is reserved for an indication of using memory, and
11381 may or may not be filled in below. */
11382 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11383 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11385 /* If any part of the struct went on the stack put all of it there.
11386 This hack is because the generic code for
11387 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11388 parts of the struct are not at the beginning. */
11389 if (cum->use_stack)
11391 if (retval)
11392 return NULL_RTX; /* doesn't go in registers at all */
11393 kbase = 0;
11394 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11396 if (k > 1 || cum->use_stack)
11397 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11398 else
11399 return NULL_RTX;
11402 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11404 static rtx
11405 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11406 int align_words)
11408 int n_units;
11409 int i, k;
11410 rtx rvec[GP_ARG_NUM_REG + 1];
11412 if (align_words >= GP_ARG_NUM_REG)
11413 return NULL_RTX;
11415 n_units = rs6000_arg_size (mode, type);
11417 /* Optimize the simple case where the arg fits in one gpr, except in
11418 the case of BLKmode due to assign_parms assuming that registers are
11419 BITS_PER_WORD wide. */
11420 if (n_units == 0
11421 || (n_units == 1 && mode != BLKmode))
11422 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11424 k = 0;
11425 if (align_words + n_units > GP_ARG_NUM_REG)
11426 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11427 using a magic NULL_RTX component.
11428 This is not strictly correct. Only some of the arg belongs in
11429 memory, not all of it. However, the normal scheme using
11430 function_arg_partial_nregs can result in unusual subregs, eg.
11431 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11432 store the whole arg to memory is often more efficient than code
11433 to store pieces, and we know that space is available in the right
11434 place for the whole arg. */
11435 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11437 i = 0;
11440 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11441 rtx off = GEN_INT (i++ * 4);
11442 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11444 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11446 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11449 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11450 but must also be copied into the parameter save area starting at
11451 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11452 to the GPRs and/or memory. Return the number of elements used. */
11454 static int
11455 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11456 int align_words, rtx *rvec)
11458 int k = 0;
11460 if (align_words < GP_ARG_NUM_REG)
11462 int n_words = rs6000_arg_size (mode, type);
11464 if (align_words + n_words > GP_ARG_NUM_REG
11465 || mode == BLKmode
11466 || (TARGET_32BIT && TARGET_POWERPC64))
11468 /* If this is partially on the stack, then we only
11469 include the portion actually in registers here. */
11470 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11471 int i = 0;
11473 if (align_words + n_words > GP_ARG_NUM_REG)
11475 /* Not all of the arg fits in gprs. Say that it goes in memory
11476 too, using a magic NULL_RTX component. Also see comment in
11477 rs6000_mixed_function_arg for why the normal
11478 function_arg_partial_nregs scheme doesn't work in this case. */
11479 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11484 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11485 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11486 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11488 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11490 else
11492 /* The whole arg fits in gprs. */
11493 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11494 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11497 else
11499 /* It's entirely in memory. */
11500 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11503 return k;
11506 /* RVEC is a vector of K components of an argument of mode MODE.
11507 Construct the final function_arg return value from it. */
11509 static rtx
11510 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11512 gcc_assert (k >= 1);
11514 /* Avoid returning a PARALLEL in the trivial cases. */
11515 if (k == 1)
11517 if (XEXP (rvec[0], 0) == NULL_RTX)
11518 return NULL_RTX;
11520 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11521 return XEXP (rvec[0], 0);
11524 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11527 /* Determine where to put an argument to a function.
11528 Value is zero to push the argument on the stack,
11529 or a hard register in which to store the argument.
11531 MODE is the argument's machine mode.
11532 TYPE is the data type of the argument (as a tree).
11533 This is null for libcalls where that information may
11534 not be available.
11535 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11536 the preceding args and about the function being called. It is
11537 not modified in this routine.
11538 NAMED is nonzero if this argument is a named parameter
11539 (otherwise it is an extra parameter matching an ellipsis).
11541 On RS/6000 the first eight words of non-FP are normally in registers
11542 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11543 Under V.4, the first 8 FP args are in registers.
11545 If this is floating-point and no prototype is specified, we use
11546 both an FP and integer register (or possibly FP reg and stack). Library
11547 functions (when CALL_LIBCALL is set) always have the proper types for args,
11548 so we can pass the FP value just in one register. emit_library_function
11549 doesn't support PARALLEL anyway.
11551 Note that for args passed by reference, function_arg will be called
11552 with MODE and TYPE set to that of the pointer to the arg, not the arg
11553 itself. */
11555 static rtx
11556 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11557 const_tree type, bool named)
11559 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11560 enum rs6000_abi abi = DEFAULT_ABI;
11561 machine_mode elt_mode;
11562 int n_elts;
11564 /* Return a marker to indicate whether CR1 needs to set or clear the
11565 bit that V.4 uses to say fp args were passed in registers.
11566 Assume that we don't need the marker for software floating point,
11567 or compiler generated library calls. */
11568 if (mode == VOIDmode)
11570 if (abi == ABI_V4
11571 && (cum->call_cookie & CALL_LIBCALL) == 0
11572 && (cum->stdarg
11573 || (cum->nargs_prototype < 0
11574 && (cum->prototype || TARGET_NO_PROTOTYPE))))
11576 /* For the SPE, we need to crxor CR6 always. */
11577 if (TARGET_SPE_ABI)
11578 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
11579 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
11580 return GEN_INT (cum->call_cookie
11581 | ((cum->fregno == FP_ARG_MIN_REG)
11582 ? CALL_V4_SET_FP_ARGS
11583 : CALL_V4_CLEAR_FP_ARGS));
11586 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11589 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11591 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11593 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11594 if (rslt != NULL_RTX)
11595 return rslt;
11596 /* Else fall through to usual handling. */
11599 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11601 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11602 rtx r, off;
11603 int i, k = 0;
11605 /* Do we also need to pass this argument in the parameter save area?
11606 Library support functions for IEEE 128-bit are assumed to not need the
11607 value passed both in GPRs and in vector registers. */
11608 if (TARGET_64BIT && !cum->prototype
11609 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11611 int align_words = ROUND_UP (cum->words, 2);
11612 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11615 /* Describe where this argument goes in the vector registers. */
11616 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11618 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11619 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11620 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11623 return rs6000_finish_function_arg (mode, rvec, k);
11625 else if (TARGET_ALTIVEC_ABI
11626 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11627 || (type && TREE_CODE (type) == VECTOR_TYPE
11628 && int_size_in_bytes (type) == 16)))
11630 if (named || abi == ABI_V4)
11631 return NULL_RTX;
11632 else
11634 /* Vector parameters to varargs functions under AIX or Darwin
11635 get passed in memory and possibly also in GPRs. */
11636 int align, align_words, n_words;
11637 machine_mode part_mode;
11639 /* Vector parameters must be 16-byte aligned. In 32-bit
11640 mode this means we need to take into account the offset
11641 to the parameter save area. In 64-bit mode, they just
11642 have to start on an even word, since the parameter save
11643 area is 16-byte aligned. */
11644 if (TARGET_32BIT)
11645 align = -(rs6000_parm_offset () + cum->words) & 3;
11646 else
11647 align = cum->words & 1;
11648 align_words = cum->words + align;
11650 /* Out of registers? Memory, then. */
11651 if (align_words >= GP_ARG_NUM_REG)
11652 return NULL_RTX;
11654 if (TARGET_32BIT && TARGET_POWERPC64)
11655 return rs6000_mixed_function_arg (mode, type, align_words);
11657 /* The vector value goes in GPRs. Only the part of the
11658 value in GPRs is reported here. */
11659 part_mode = mode;
11660 n_words = rs6000_arg_size (mode, type);
11661 if (align_words + n_words > GP_ARG_NUM_REG)
11662 /* Fortunately, there are only two possibilities, the value
11663 is either wholly in GPRs or half in GPRs and half not. */
11664 part_mode = DImode;
11666 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11669 else if (TARGET_SPE_ABI && TARGET_SPE
11670 && (SPE_VECTOR_MODE (mode)
11671 || (TARGET_E500_DOUBLE && (mode == DFmode
11672 || mode == DCmode
11673 || mode == TFmode
11674 || mode == TCmode))))
11675 return rs6000_spe_function_arg (cum, mode, type);
11677 else if (abi == ABI_V4)
11679 if (abi_v4_pass_in_fpr (mode))
11681 /* _Decimal128 must use an even/odd register pair. This assumes
11682 that the register number is odd when fregno is odd. */
11683 if (mode == TDmode && (cum->fregno % 2) == 1)
11684 cum->fregno++;
11686 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11687 <= FP_ARG_V4_MAX_REG)
11688 return gen_rtx_REG (mode, cum->fregno);
11689 else
11690 return NULL_RTX;
11692 else
11694 int n_words = rs6000_arg_size (mode, type);
11695 int gregno = cum->sysv_gregno;
11697 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
11698 (r7,r8) or (r9,r10). As does any other 2 word item such
11699 as complex int due to a historical mistake. */
11700 if (n_words == 2)
11701 gregno += (1 - gregno) & 1;
11703 /* Multi-reg args are not split between registers and stack. */
11704 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11705 return NULL_RTX;
11707 if (TARGET_32BIT && TARGET_POWERPC64)
11708 return rs6000_mixed_function_arg (mode, type,
11709 gregno - GP_ARG_MIN_REG);
11710 return gen_rtx_REG (mode, gregno);
11713 else
11715 int align_words = rs6000_parm_start (mode, type, cum->words);
11717 /* _Decimal128 must be passed in an even/odd float register pair.
11718 This assumes that the register number is odd when fregno is odd. */
11719 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11720 cum->fregno++;
11722 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11724 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11725 rtx r, off;
11726 int i, k = 0;
11727 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11728 int fpr_words;
11730 /* Do we also need to pass this argument in the parameter
11731 save area? */
11732 if (type && (cum->nargs_prototype <= 0
11733 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11734 && TARGET_XL_COMPAT
11735 && align_words >= GP_ARG_NUM_REG)))
11736 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11738 /* Describe where this argument goes in the fprs. */
11739 for (i = 0; i < n_elts
11740 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11742 /* Check if the argument is split over registers and memory.
11743 This can only ever happen for long double or _Decimal128;
11744 complex types are handled via split_complex_arg. */
11745 machine_mode fmode = elt_mode;
11746 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11748 gcc_assert (FLOAT128_2REG_P (fmode));
11749 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11752 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11753 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11754 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11757 /* If there were not enough FPRs to hold the argument, the rest
11758 usually goes into memory. However, if the current position
11759 is still within the register parameter area, a portion may
11760 actually have to go into GPRs.
11762 Note that it may happen that the portion of the argument
11763 passed in the first "half" of the first GPR was already
11764 passed in the last FPR as well.
11766 For unnamed arguments, we already set up GPRs to cover the
11767 whole argument in rs6000_psave_function_arg, so there is
11768 nothing further to do at this point. */
11769 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11770 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11771 && cum->nargs_prototype > 0)
11773 static bool warned;
11775 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11776 int n_words = rs6000_arg_size (mode, type);
11778 align_words += fpr_words;
11779 n_words -= fpr_words;
11783 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11784 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11785 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11787 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11789 if (!warned && warn_psabi)
11791 warned = true;
11792 inform (input_location,
11793 "the ABI of passing homogeneous float aggregates"
11794 " has changed in GCC 5");
11798 return rs6000_finish_function_arg (mode, rvec, k);
11800 else if (align_words < GP_ARG_NUM_REG)
11802 if (TARGET_32BIT && TARGET_POWERPC64)
11803 return rs6000_mixed_function_arg (mode, type, align_words);
11805 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11807 else
11808 return NULL_RTX;
11812 /* For an arg passed partly in registers and partly in memory, this is
11813 the number of bytes passed in registers. For args passed entirely in
11814 registers or entirely in memory, zero. When an arg is described by a
11815 PARALLEL, perhaps using more than one register type, this function
11816 returns the number of bytes used by the first element of the PARALLEL. */
11818 static int
11819 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11820 tree type, bool named)
11822 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11823 bool passed_in_gprs = true;
11824 int ret = 0;
11825 int align_words;
11826 machine_mode elt_mode;
11827 int n_elts;
11829 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11831 if (DEFAULT_ABI == ABI_V4)
11832 return 0;
11834 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11836 /* If we are passing this arg in the fixed parameter save area (gprs or
11837 memory) as well as VRs, we do not use the partial bytes mechanism;
11838 instead, rs6000_function_arg will return a PARALLEL including a memory
11839 element as necessary. Library support functions for IEEE 128-bit are
11840 assumed to not need the value passed both in GPRs and in vector
11841 registers. */
11842 if (TARGET_64BIT && !cum->prototype
11843 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11844 return 0;
11846 /* Otherwise, we pass in VRs only. Check for partial copies. */
11847 passed_in_gprs = false;
11848 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11849 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11852 /* In this complicated case we just disable the partial_nregs code. */
11853 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11854 return 0;
11856 align_words = rs6000_parm_start (mode, type, cum->words);
11858 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11860 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11862 /* If we are passing this arg in the fixed parameter save area
11863 (gprs or memory) as well as FPRs, we do not use the partial
11864 bytes mechanism; instead, rs6000_function_arg will return a
11865 PARALLEL including a memory element as necessary. */
11866 if (type
11867 && (cum->nargs_prototype <= 0
11868 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11869 && TARGET_XL_COMPAT
11870 && align_words >= GP_ARG_NUM_REG)))
11871 return 0;
11873 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11874 passed_in_gprs = false;
11875 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11877 /* Compute number of bytes / words passed in FPRs. If there
11878 is still space available in the register parameter area
11879 *after* that amount, a part of the argument will be passed
11880 in GPRs. In that case, the total amount passed in any
11881 registers is equal to the amount that would have been passed
11882 in GPRs if everything were passed there, so we fall back to
11883 the GPR code below to compute the appropriate value. */
11884 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11885 * MIN (8, GET_MODE_SIZE (elt_mode)));
11886 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11888 if (align_words + fpr_words < GP_ARG_NUM_REG)
11889 passed_in_gprs = true;
11890 else
11891 ret = fpr;
11895 if (passed_in_gprs
11896 && align_words < GP_ARG_NUM_REG
11897 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11898 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11900 if (ret != 0 && TARGET_DEBUG_ARG)
11901 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11903 return ret;
11906 /* A C expression that indicates when an argument must be passed by
11907 reference. If nonzero for an argument, a copy of that argument is
11908 made in memory and a pointer to the argument is passed instead of
11909 the argument itself. The pointer is passed in whatever way is
11910 appropriate for passing a pointer to that type.
11912 Under V.4, aggregates and long double are passed by reference.
11914 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11915 reference unless the AltiVec vector extension ABI is in force.
11917 As an extension to all ABIs, variable sized types are passed by
11918 reference. */
11920 static bool
11921 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11922 machine_mode mode, const_tree type,
11923 bool named ATTRIBUTE_UNUSED)
11925 if (!type)
11926 return 0;
11928 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11929 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11931 if (TARGET_DEBUG_ARG)
11932 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11933 return 1;
11936 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11938 if (TARGET_DEBUG_ARG)
11939 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11940 return 1;
11943 if (int_size_in_bytes (type) < 0)
11945 if (TARGET_DEBUG_ARG)
11946 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11947 return 1;
11950 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11951 modes only exist for GCC vector types if -maltivec. */
11952 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11954 if (TARGET_DEBUG_ARG)
11955 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11956 return 1;
11959 /* Pass synthetic vectors in memory. */
11960 if (TREE_CODE (type) == VECTOR_TYPE
11961 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11963 static bool warned_for_pass_big_vectors = false;
11964 if (TARGET_DEBUG_ARG)
11965 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11966 if (!warned_for_pass_big_vectors)
11968 warning (0, "GCC vector passed by reference: "
11969 "non-standard ABI extension with no compatibility guarantee");
11970 warned_for_pass_big_vectors = true;
11972 return 1;
11975 return 0;
11978 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11979 already processes. Return true if the parameter must be passed
11980 (fully or partially) on the stack. */
11982 static bool
11983 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11985 machine_mode mode;
11986 int unsignedp;
11987 rtx entry_parm;
11989 /* Catch errors. */
11990 if (type == NULL || type == error_mark_node)
11991 return true;
11993 /* Handle types with no storage requirement. */
11994 if (TYPE_MODE (type) == VOIDmode)
11995 return false;
11997 /* Handle complex types. */
11998 if (TREE_CODE (type) == COMPLEX_TYPE)
11999 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12000 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12002 /* Handle transparent aggregates. */
12003 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12004 && TYPE_TRANSPARENT_AGGR (type))
12005 type = TREE_TYPE (first_field (type));
12007 /* See if this arg was passed by invisible reference. */
12008 if (pass_by_reference (get_cumulative_args (args_so_far),
12009 TYPE_MODE (type), type, true))
12010 type = build_pointer_type (type);
12012 /* Find mode as it is passed by the ABI. */
12013 unsignedp = TYPE_UNSIGNED (type);
12014 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12016 /* If we must pass in stack, we need a stack. */
12017 if (rs6000_must_pass_in_stack (mode, type))
12018 return true;
12020 /* If there is no incoming register, we need a stack. */
12021 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12022 if (entry_parm == NULL)
12023 return true;
12025 /* Likewise if we need to pass both in registers and on the stack. */
12026 if (GET_CODE (entry_parm) == PARALLEL
12027 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12028 return true;
12030 /* Also true if we're partially in registers and partially not. */
12031 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12032 return true;
12034 /* Update info on where next arg arrives in registers. */
12035 rs6000_function_arg_advance (args_so_far, mode, type, true);
12036 return false;
12039 /* Return true if FUN has no prototype, has a variable argument
12040 list, or passes any parameter in memory. */
12042 static bool
12043 rs6000_function_parms_need_stack (tree fun, bool incoming)
12045 tree fntype, result;
12046 CUMULATIVE_ARGS args_so_far_v;
12047 cumulative_args_t args_so_far;
12049 if (!fun)
12050 /* Must be a libcall, all of which only use reg parms. */
12051 return false;
12053 fntype = fun;
12054 if (!TYPE_P (fun))
12055 fntype = TREE_TYPE (fun);
12057 /* Varargs functions need the parameter save area. */
12058 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12059 return true;
12061 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12062 args_so_far = pack_cumulative_args (&args_so_far_v);
12064 /* When incoming, we will have been passed the function decl.
12065 It is necessary to use the decl to handle K&R style functions,
12066 where TYPE_ARG_TYPES may not be available. */
12067 if (incoming)
12069 gcc_assert (DECL_P (fun));
12070 result = DECL_RESULT (fun);
12072 else
12073 result = TREE_TYPE (fntype);
12075 if (result && aggregate_value_p (result, fntype))
12077 if (!TYPE_P (result))
12078 result = TREE_TYPE (result);
12079 result = build_pointer_type (result);
12080 rs6000_parm_needs_stack (args_so_far, result);
12083 if (incoming)
12085 tree parm;
12087 for (parm = DECL_ARGUMENTS (fun);
12088 parm && parm != void_list_node;
12089 parm = TREE_CHAIN (parm))
12090 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12091 return true;
12093 else
12095 function_args_iterator args_iter;
12096 tree arg_type;
12098 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12099 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12100 return true;
12103 return false;
12106 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12107 usually a constant depending on the ABI. However, in the ELFv2 ABI
12108 the register parameter area is optional when calling a function that
12109 has a prototype is scope, has no variable argument list, and passes
12110 all parameters in registers. */
12113 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12115 int reg_parm_stack_space;
12117 switch (DEFAULT_ABI)
12119 default:
12120 reg_parm_stack_space = 0;
12121 break;
12123 case ABI_AIX:
12124 case ABI_DARWIN:
12125 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12126 break;
12128 case ABI_ELFv2:
12129 /* ??? Recomputing this every time is a bit expensive. Is there
12130 a place to cache this information? */
12131 if (rs6000_function_parms_need_stack (fun, incoming))
12132 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12133 else
12134 reg_parm_stack_space = 0;
12135 break;
12138 return reg_parm_stack_space;
12141 static void
12142 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12144 int i;
12145 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12147 if (nregs == 0)
12148 return;
12150 for (i = 0; i < nregs; i++)
12152 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12153 if (reload_completed)
12155 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12156 tem = NULL_RTX;
12157 else
12158 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12159 i * GET_MODE_SIZE (reg_mode));
12161 else
12162 tem = replace_equiv_address (tem, XEXP (tem, 0));
12164 gcc_assert (tem);
12166 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12170 /* Perform any needed actions needed for a function that is receiving a
12171 variable number of arguments.
12173 CUM is as above.
12175 MODE and TYPE are the mode and type of the current parameter.
12177 PRETEND_SIZE is a variable that should be set to the amount of stack
12178 that must be pushed by the prolog to pretend that our caller pushed
12181 Normally, this macro will push all remaining incoming registers on the
12182 stack and set PRETEND_SIZE to the length of the registers pushed. */
12184 static void
12185 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12186 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12187 int no_rtl)
12189 CUMULATIVE_ARGS next_cum;
12190 int reg_size = TARGET_32BIT ? 4 : 8;
12191 rtx save_area = NULL_RTX, mem;
12192 int first_reg_offset;
12193 alias_set_type set;
12195 /* Skip the last named argument. */
12196 next_cum = *get_cumulative_args (cum);
12197 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12199 if (DEFAULT_ABI == ABI_V4)
12201 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12203 if (! no_rtl)
12205 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12206 HOST_WIDE_INT offset = 0;
12208 /* Try to optimize the size of the varargs save area.
12209 The ABI requires that ap.reg_save_area is doubleword
12210 aligned, but we don't need to allocate space for all
12211 the bytes, only those to which we actually will save
12212 anything. */
12213 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12214 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12215 if (TARGET_HARD_FLOAT && TARGET_FPRS
12216 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12217 && cfun->va_list_fpr_size)
12219 if (gpr_reg_num)
12220 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12221 * UNITS_PER_FP_WORD;
12222 if (cfun->va_list_fpr_size
12223 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12224 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12225 else
12226 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12227 * UNITS_PER_FP_WORD;
12229 if (gpr_reg_num)
12231 offset = -((first_reg_offset * reg_size) & ~7);
12232 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12234 gpr_reg_num = cfun->va_list_gpr_size;
12235 if (reg_size == 4 && (first_reg_offset & 1))
12236 gpr_reg_num++;
12238 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12240 else if (fpr_size)
12241 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12242 * UNITS_PER_FP_WORD
12243 - (int) (GP_ARG_NUM_REG * reg_size);
12245 if (gpr_size + fpr_size)
12247 rtx reg_save_area
12248 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12249 gcc_assert (GET_CODE (reg_save_area) == MEM);
12250 reg_save_area = XEXP (reg_save_area, 0);
12251 if (GET_CODE (reg_save_area) == PLUS)
12253 gcc_assert (XEXP (reg_save_area, 0)
12254 == virtual_stack_vars_rtx);
12255 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12256 offset += INTVAL (XEXP (reg_save_area, 1));
12258 else
12259 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12262 cfun->machine->varargs_save_offset = offset;
12263 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12266 else
12268 first_reg_offset = next_cum.words;
12269 save_area = crtl->args.internal_arg_pointer;
12271 if (targetm.calls.must_pass_in_stack (mode, type))
12272 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12275 set = get_varargs_alias_set ();
12276 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12277 && cfun->va_list_gpr_size)
12279 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12281 if (va_list_gpr_counter_field)
12282 /* V4 va_list_gpr_size counts number of registers needed. */
12283 n_gpr = cfun->va_list_gpr_size;
12284 else
12285 /* char * va_list instead counts number of bytes needed. */
12286 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12288 if (nregs > n_gpr)
12289 nregs = n_gpr;
12291 mem = gen_rtx_MEM (BLKmode,
12292 plus_constant (Pmode, save_area,
12293 first_reg_offset * reg_size));
12294 MEM_NOTRAP_P (mem) = 1;
12295 set_mem_alias_set (mem, set);
12296 set_mem_align (mem, BITS_PER_WORD);
12298 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12299 nregs);
12302 /* Save FP registers if needed. */
12303 if (DEFAULT_ABI == ABI_V4
12304 && TARGET_HARD_FLOAT && TARGET_FPRS
12305 && ! no_rtl
12306 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12307 && cfun->va_list_fpr_size)
12309 int fregno = next_cum.fregno, nregs;
12310 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12311 rtx lab = gen_label_rtx ();
12312 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12313 * UNITS_PER_FP_WORD);
12315 emit_jump_insn
12316 (gen_rtx_SET (pc_rtx,
12317 gen_rtx_IF_THEN_ELSE (VOIDmode,
12318 gen_rtx_NE (VOIDmode, cr1,
12319 const0_rtx),
12320 gen_rtx_LABEL_REF (VOIDmode, lab),
12321 pc_rtx)));
12323 for (nregs = 0;
12324 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12325 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12327 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12328 ? DFmode : SFmode,
12329 plus_constant (Pmode, save_area, off));
12330 MEM_NOTRAP_P (mem) = 1;
12331 set_mem_alias_set (mem, set);
12332 set_mem_align (mem, GET_MODE_ALIGNMENT (
12333 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12334 ? DFmode : SFmode));
12335 emit_move_insn (mem, gen_rtx_REG (
12336 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12337 ? DFmode : SFmode, fregno));
12340 emit_label (lab);
12344 /* Create the va_list data type. */
12346 static tree
12347 rs6000_build_builtin_va_list (void)
12349 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12351 /* For AIX, prefer 'char *' because that's what the system
12352 header files like. */
12353 if (DEFAULT_ABI != ABI_V4)
12354 return build_pointer_type (char_type_node);
12356 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12357 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12358 get_identifier ("__va_list_tag"), record);
12360 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12361 unsigned_char_type_node);
12362 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12363 unsigned_char_type_node);
12364 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12365 every user file. */
12366 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12367 get_identifier ("reserved"), short_unsigned_type_node);
12368 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12369 get_identifier ("overflow_arg_area"),
12370 ptr_type_node);
12371 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12372 get_identifier ("reg_save_area"),
12373 ptr_type_node);
12375 va_list_gpr_counter_field = f_gpr;
12376 va_list_fpr_counter_field = f_fpr;
12378 DECL_FIELD_CONTEXT (f_gpr) = record;
12379 DECL_FIELD_CONTEXT (f_fpr) = record;
12380 DECL_FIELD_CONTEXT (f_res) = record;
12381 DECL_FIELD_CONTEXT (f_ovf) = record;
12382 DECL_FIELD_CONTEXT (f_sav) = record;
12384 TYPE_STUB_DECL (record) = type_decl;
12385 TYPE_NAME (record) = type_decl;
12386 TYPE_FIELDS (record) = f_gpr;
12387 DECL_CHAIN (f_gpr) = f_fpr;
12388 DECL_CHAIN (f_fpr) = f_res;
12389 DECL_CHAIN (f_res) = f_ovf;
12390 DECL_CHAIN (f_ovf) = f_sav;
12392 layout_type (record);
12394 /* The correct type is an array type of one element. */
12395 return build_array_type (record, build_index_type (size_zero_node));
12398 /* Implement va_start. */
12400 static void
12401 rs6000_va_start (tree valist, rtx nextarg)
12403 HOST_WIDE_INT words, n_gpr, n_fpr;
12404 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12405 tree gpr, fpr, ovf, sav, t;
12407 /* Only SVR4 needs something special. */
12408 if (DEFAULT_ABI != ABI_V4)
12410 std_expand_builtin_va_start (valist, nextarg);
12411 return;
12414 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12415 f_fpr = DECL_CHAIN (f_gpr);
12416 f_res = DECL_CHAIN (f_fpr);
12417 f_ovf = DECL_CHAIN (f_res);
12418 f_sav = DECL_CHAIN (f_ovf);
12420 valist = build_simple_mem_ref (valist);
12421 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12422 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12423 f_fpr, NULL_TREE);
12424 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12425 f_ovf, NULL_TREE);
12426 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12427 f_sav, NULL_TREE);
12429 /* Count number of gp and fp argument registers used. */
12430 words = crtl->args.info.words;
12431 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12432 GP_ARG_NUM_REG);
12433 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12434 FP_ARG_NUM_REG);
12436 if (TARGET_DEBUG_ARG)
12437 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12438 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12439 words, n_gpr, n_fpr);
12441 if (cfun->va_list_gpr_size)
12443 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12444 build_int_cst (NULL_TREE, n_gpr));
12445 TREE_SIDE_EFFECTS (t) = 1;
12446 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12449 if (cfun->va_list_fpr_size)
12451 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12452 build_int_cst (NULL_TREE, n_fpr));
12453 TREE_SIDE_EFFECTS (t) = 1;
12454 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12456 #ifdef HAVE_AS_GNU_ATTRIBUTE
12457 if (call_ABI_of_interest (cfun->decl))
12458 rs6000_passes_float = true;
12459 #endif
12462 /* Find the overflow area. */
12463 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12464 if (words != 0)
12465 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12466 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12467 TREE_SIDE_EFFECTS (t) = 1;
12468 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12470 /* If there were no va_arg invocations, don't set up the register
12471 save area. */
12472 if (!cfun->va_list_gpr_size
12473 && !cfun->va_list_fpr_size
12474 && n_gpr < GP_ARG_NUM_REG
12475 && n_fpr < FP_ARG_V4_MAX_REG)
12476 return;
12478 /* Find the register save area. */
12479 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12480 if (cfun->machine->varargs_save_offset)
12481 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12482 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12483 TREE_SIDE_EFFECTS (t) = 1;
12484 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12487 /* Implement va_arg. */
12489 static tree
12490 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12491 gimple_seq *post_p)
12493 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12494 tree gpr, fpr, ovf, sav, reg, t, u;
12495 int size, rsize, n_reg, sav_ofs, sav_scale;
12496 tree lab_false, lab_over, addr;
12497 int align;
12498 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12499 int regalign = 0;
12500 gimple *stmt;
12502 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12504 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12505 return build_va_arg_indirect_ref (t);
12508 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12509 earlier version of gcc, with the property that it always applied alignment
12510 adjustments to the va-args (even for zero-sized types). The cheapest way
12511 to deal with this is to replicate the effect of the part of
12512 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12513 of relevance.
12514 We don't need to check for pass-by-reference because of the test above.
12515 We can return a simplifed answer, since we know there's no offset to add. */
12517 if (((TARGET_MACHO
12518 && rs6000_darwin64_abi)
12519 || DEFAULT_ABI == ABI_ELFv2
12520 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12521 && integer_zerop (TYPE_SIZE (type)))
12523 unsigned HOST_WIDE_INT align, boundary;
12524 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12525 align = PARM_BOUNDARY / BITS_PER_UNIT;
12526 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12527 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12528 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12529 boundary /= BITS_PER_UNIT;
12530 if (boundary > align)
12532 tree t ;
12533 /* This updates arg ptr by the amount that would be necessary
12534 to align the zero-sized (but not zero-alignment) item. */
12535 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12536 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12537 gimplify_and_add (t, pre_p);
12539 t = fold_convert (sizetype, valist_tmp);
12540 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12541 fold_convert (TREE_TYPE (valist),
12542 fold_build2 (BIT_AND_EXPR, sizetype, t,
12543 size_int (-boundary))));
12544 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12545 gimplify_and_add (t, pre_p);
12547 /* Since it is zero-sized there's no increment for the item itself. */
12548 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12549 return build_va_arg_indirect_ref (valist_tmp);
12552 if (DEFAULT_ABI != ABI_V4)
12554 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12556 tree elem_type = TREE_TYPE (type);
12557 machine_mode elem_mode = TYPE_MODE (elem_type);
12558 int elem_size = GET_MODE_SIZE (elem_mode);
12560 if (elem_size < UNITS_PER_WORD)
12562 tree real_part, imag_part;
12563 gimple_seq post = NULL;
12565 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12566 &post);
12567 /* Copy the value into a temporary, lest the formal temporary
12568 be reused out from under us. */
12569 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12570 gimple_seq_add_seq (pre_p, post);
12572 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12573 post_p);
12575 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12579 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12582 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12583 f_fpr = DECL_CHAIN (f_gpr);
12584 f_res = DECL_CHAIN (f_fpr);
12585 f_ovf = DECL_CHAIN (f_res);
12586 f_sav = DECL_CHAIN (f_ovf);
12588 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12589 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12590 f_fpr, NULL_TREE);
12591 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12592 f_ovf, NULL_TREE);
12593 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12594 f_sav, NULL_TREE);
12596 size = int_size_in_bytes (type);
12597 rsize = (size + 3) / 4;
12598 align = 1;
12600 machine_mode mode = TYPE_MODE (type);
12601 if (abi_v4_pass_in_fpr (mode))
12603 /* FP args go in FP registers, if present. */
12604 reg = fpr;
12605 n_reg = (size + 7) / 8;
12606 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
12607 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
12608 if (mode != SFmode && mode != SDmode)
12609 align = 8;
12611 else
12613 /* Otherwise into GP registers. */
12614 reg = gpr;
12615 n_reg = rsize;
12616 sav_ofs = 0;
12617 sav_scale = 4;
12618 if (n_reg == 2)
12619 align = 8;
12622 /* Pull the value out of the saved registers.... */
12624 lab_over = NULL;
12625 addr = create_tmp_var (ptr_type_node, "addr");
12627 /* AltiVec vectors never go in registers when -mabi=altivec. */
12628 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12629 align = 16;
12630 else
12632 lab_false = create_artificial_label (input_location);
12633 lab_over = create_artificial_label (input_location);
12635 /* Long long and SPE vectors are aligned in the registers.
12636 As are any other 2 gpr item such as complex int due to a
12637 historical mistake. */
12638 u = reg;
12639 if (n_reg == 2 && reg == gpr)
12641 regalign = 1;
12642 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12643 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12644 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12645 unshare_expr (reg), u);
12647 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12648 reg number is 0 for f1, so we want to make it odd. */
12649 else if (reg == fpr && mode == TDmode)
12651 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12652 build_int_cst (TREE_TYPE (reg), 1));
12653 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12656 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12657 t = build2 (GE_EXPR, boolean_type_node, u, t);
12658 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12659 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12660 gimplify_and_add (t, pre_p);
12662 t = sav;
12663 if (sav_ofs)
12664 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12666 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12667 build_int_cst (TREE_TYPE (reg), n_reg));
12668 u = fold_convert (sizetype, u);
12669 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12670 t = fold_build_pointer_plus (t, u);
12672 /* _Decimal32 varargs are located in the second word of the 64-bit
12673 FP register for 32-bit binaries. */
12674 if (TARGET_32BIT
12675 && TARGET_HARD_FLOAT && TARGET_FPRS
12676 && mode == SDmode)
12677 t = fold_build_pointer_plus_hwi (t, size);
12679 gimplify_assign (addr, t, pre_p);
12681 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12683 stmt = gimple_build_label (lab_false);
12684 gimple_seq_add_stmt (pre_p, stmt);
12686 if ((n_reg == 2 && !regalign) || n_reg > 2)
12688 /* Ensure that we don't find any more args in regs.
12689 Alignment has taken care of for special cases. */
12690 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12694 /* ... otherwise out of the overflow area. */
12696 /* Care for on-stack alignment if needed. */
12697 t = ovf;
12698 if (align != 1)
12700 t = fold_build_pointer_plus_hwi (t, align - 1);
12701 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12702 build_int_cst (TREE_TYPE (t), -align));
12704 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12706 gimplify_assign (unshare_expr (addr), t, pre_p);
12708 t = fold_build_pointer_plus_hwi (t, size);
12709 gimplify_assign (unshare_expr (ovf), t, pre_p);
12711 if (lab_over)
12713 stmt = gimple_build_label (lab_over);
12714 gimple_seq_add_stmt (pre_p, stmt);
12717 if (STRICT_ALIGNMENT
12718 && (TYPE_ALIGN (type)
12719 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12721 /* The value (of type complex double, for example) may not be
12722 aligned in memory in the saved registers, so copy via a
12723 temporary. (This is the same code as used for SPARC.) */
12724 tree tmp = create_tmp_var (type, "va_arg_tmp");
12725 tree dest_addr = build_fold_addr_expr (tmp);
12727 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12728 3, dest_addr, addr, size_int (rsize * 4));
12730 gimplify_and_add (copy, pre_p);
12731 addr = dest_addr;
12734 addr = fold_convert (ptrtype, addr);
12735 return build_va_arg_indirect_ref (addr);
12738 /* Builtins. */
12740 static void
12741 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12743 tree t;
12744 unsigned classify = rs6000_builtin_info[(int)code].attr;
12745 const char *attr_string = "";
12747 gcc_assert (name != NULL);
12748 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12750 if (rs6000_builtin_decls[(int)code])
12751 fatal_error (input_location,
12752 "internal error: builtin function %s already processed", name);
12754 rs6000_builtin_decls[(int)code] = t =
12755 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12757 /* Set any special attributes. */
12758 if ((classify & RS6000_BTC_CONST) != 0)
12760 /* const function, function only depends on the inputs. */
12761 TREE_READONLY (t) = 1;
12762 TREE_NOTHROW (t) = 1;
12763 attr_string = ", const";
12765 else if ((classify & RS6000_BTC_PURE) != 0)
12767 /* pure function, function can read global memory, but does not set any
12768 external state. */
12769 DECL_PURE_P (t) = 1;
12770 TREE_NOTHROW (t) = 1;
12771 attr_string = ", pure";
12773 else if ((classify & RS6000_BTC_FP) != 0)
12775 /* Function is a math function. If rounding mode is on, then treat the
12776 function as not reading global memory, but it can have arbitrary side
12777 effects. If it is off, then assume the function is a const function.
12778 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12779 builtin-attribute.def that is used for the math functions. */
12780 TREE_NOTHROW (t) = 1;
12781 if (flag_rounding_math)
12783 DECL_PURE_P (t) = 1;
12784 DECL_IS_NOVOPS (t) = 1;
12785 attr_string = ", fp, pure";
12787 else
12789 TREE_READONLY (t) = 1;
12790 attr_string = ", fp, const";
12793 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12794 gcc_unreachable ();
12796 if (TARGET_DEBUG_BUILTIN)
12797 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12798 (int)code, name, attr_string);
12801 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12803 #undef RS6000_BUILTIN_0
12804 #undef RS6000_BUILTIN_1
12805 #undef RS6000_BUILTIN_2
12806 #undef RS6000_BUILTIN_3
12807 #undef RS6000_BUILTIN_A
12808 #undef RS6000_BUILTIN_D
12809 #undef RS6000_BUILTIN_E
12810 #undef RS6000_BUILTIN_H
12811 #undef RS6000_BUILTIN_P
12812 #undef RS6000_BUILTIN_Q
12813 #undef RS6000_BUILTIN_S
12814 #undef RS6000_BUILTIN_X
12816 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12817 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12818 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12819 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12820 { MASK, ICODE, NAME, ENUM },
12822 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12823 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12824 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12825 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12826 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12827 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12828 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12829 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12831 static const struct builtin_description bdesc_3arg[] =
12833 #include "rs6000-builtin.def"
12836 /* DST operations: void foo (void *, const int, const char). */
12838 #undef RS6000_BUILTIN_0
12839 #undef RS6000_BUILTIN_1
12840 #undef RS6000_BUILTIN_2
12841 #undef RS6000_BUILTIN_3
12842 #undef RS6000_BUILTIN_A
12843 #undef RS6000_BUILTIN_D
12844 #undef RS6000_BUILTIN_E
12845 #undef RS6000_BUILTIN_H
12846 #undef RS6000_BUILTIN_P
12847 #undef RS6000_BUILTIN_Q
12848 #undef RS6000_BUILTIN_S
12849 #undef RS6000_BUILTIN_X
12851 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12852 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12853 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12854 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12855 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12856 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12857 { MASK, ICODE, NAME, ENUM },
12859 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12860 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12861 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12862 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12863 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12864 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12866 static const struct builtin_description bdesc_dst[] =
12868 #include "rs6000-builtin.def"
12871 /* Simple binary operations: VECc = foo (VECa, VECb). */
12873 #undef RS6000_BUILTIN_0
12874 #undef RS6000_BUILTIN_1
12875 #undef RS6000_BUILTIN_2
12876 #undef RS6000_BUILTIN_3
12877 #undef RS6000_BUILTIN_A
12878 #undef RS6000_BUILTIN_D
12879 #undef RS6000_BUILTIN_E
12880 #undef RS6000_BUILTIN_H
12881 #undef RS6000_BUILTIN_P
12882 #undef RS6000_BUILTIN_Q
12883 #undef RS6000_BUILTIN_S
12884 #undef RS6000_BUILTIN_X
12886 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12887 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12888 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12889 { MASK, ICODE, NAME, ENUM },
12891 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12892 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12893 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12894 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12895 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12896 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12897 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12898 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12899 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12901 static const struct builtin_description bdesc_2arg[] =
12903 #include "rs6000-builtin.def"
12906 #undef RS6000_BUILTIN_0
12907 #undef RS6000_BUILTIN_1
12908 #undef RS6000_BUILTIN_2
12909 #undef RS6000_BUILTIN_3
12910 #undef RS6000_BUILTIN_A
12911 #undef RS6000_BUILTIN_D
12912 #undef RS6000_BUILTIN_E
12913 #undef RS6000_BUILTIN_H
12914 #undef RS6000_BUILTIN_P
12915 #undef RS6000_BUILTIN_Q
12916 #undef RS6000_BUILTIN_S
12917 #undef RS6000_BUILTIN_X
12919 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12920 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12921 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12922 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12923 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12924 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12925 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12926 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12927 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12928 { MASK, ICODE, NAME, ENUM },
12930 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12931 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12932 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12934 /* AltiVec predicates. */
12936 static const struct builtin_description bdesc_altivec_preds[] =
12938 #include "rs6000-builtin.def"
12941 /* SPE predicates. */
12942 #undef RS6000_BUILTIN_0
12943 #undef RS6000_BUILTIN_1
12944 #undef RS6000_BUILTIN_2
12945 #undef RS6000_BUILTIN_3
12946 #undef RS6000_BUILTIN_A
12947 #undef RS6000_BUILTIN_D
12948 #undef RS6000_BUILTIN_E
12949 #undef RS6000_BUILTIN_H
12950 #undef RS6000_BUILTIN_P
12951 #undef RS6000_BUILTIN_Q
12952 #undef RS6000_BUILTIN_S
12953 #undef RS6000_BUILTIN_X
12955 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12956 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12957 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12958 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12959 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12960 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12961 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12962 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12963 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12964 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12965 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
12966 { MASK, ICODE, NAME, ENUM },
12968 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12970 static const struct builtin_description bdesc_spe_predicates[] =
12972 #include "rs6000-builtin.def"
12975 /* SPE evsel predicates. */
12976 #undef RS6000_BUILTIN_0
12977 #undef RS6000_BUILTIN_1
12978 #undef RS6000_BUILTIN_2
12979 #undef RS6000_BUILTIN_3
12980 #undef RS6000_BUILTIN_A
12981 #undef RS6000_BUILTIN_D
12982 #undef RS6000_BUILTIN_E
12983 #undef RS6000_BUILTIN_H
12984 #undef RS6000_BUILTIN_P
12985 #undef RS6000_BUILTIN_Q
12986 #undef RS6000_BUILTIN_S
12987 #undef RS6000_BUILTIN_X
12989 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12990 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12991 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12992 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12993 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12994 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12995 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
12996 { MASK, ICODE, NAME, ENUM },
12998 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12999 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13000 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13001 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13002 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13004 static const struct builtin_description bdesc_spe_evsel[] =
13006 #include "rs6000-builtin.def"
13009 /* PAIRED predicates. */
13010 #undef RS6000_BUILTIN_0
13011 #undef RS6000_BUILTIN_1
13012 #undef RS6000_BUILTIN_2
13013 #undef RS6000_BUILTIN_3
13014 #undef RS6000_BUILTIN_A
13015 #undef RS6000_BUILTIN_D
13016 #undef RS6000_BUILTIN_E
13017 #undef RS6000_BUILTIN_H
13018 #undef RS6000_BUILTIN_P
13019 #undef RS6000_BUILTIN_Q
13020 #undef RS6000_BUILTIN_S
13021 #undef RS6000_BUILTIN_X
13023 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13024 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13025 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13026 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13027 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13028 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13029 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13030 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13031 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13032 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13033 { MASK, ICODE, NAME, ENUM },
13035 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13036 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13038 static const struct builtin_description bdesc_paired_preds[] =
13040 #include "rs6000-builtin.def"
13043 /* ABS* operations. */
13045 #undef RS6000_BUILTIN_0
13046 #undef RS6000_BUILTIN_1
13047 #undef RS6000_BUILTIN_2
13048 #undef RS6000_BUILTIN_3
13049 #undef RS6000_BUILTIN_A
13050 #undef RS6000_BUILTIN_D
13051 #undef RS6000_BUILTIN_E
13052 #undef RS6000_BUILTIN_H
13053 #undef RS6000_BUILTIN_P
13054 #undef RS6000_BUILTIN_Q
13055 #undef RS6000_BUILTIN_S
13056 #undef RS6000_BUILTIN_X
13058 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13059 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13060 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13061 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13062 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13063 { MASK, ICODE, NAME, ENUM },
13065 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13066 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13067 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13068 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13069 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13070 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13071 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13073 static const struct builtin_description bdesc_abs[] =
13075 #include "rs6000-builtin.def"
13078 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13079 foo (VECa). */
13081 #undef RS6000_BUILTIN_0
13082 #undef RS6000_BUILTIN_1
13083 #undef RS6000_BUILTIN_2
13084 #undef RS6000_BUILTIN_3
13085 #undef RS6000_BUILTIN_A
13086 #undef RS6000_BUILTIN_D
13087 #undef RS6000_BUILTIN_E
13088 #undef RS6000_BUILTIN_H
13089 #undef RS6000_BUILTIN_P
13090 #undef RS6000_BUILTIN_Q
13091 #undef RS6000_BUILTIN_S
13092 #undef RS6000_BUILTIN_X
13094 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13095 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13096 { MASK, ICODE, NAME, ENUM },
13098 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13099 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13100 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13101 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13102 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13103 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13104 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13105 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13106 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13107 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13109 static const struct builtin_description bdesc_1arg[] =
13111 #include "rs6000-builtin.def"
13114 /* Simple no-argument operations: result = __builtin_darn_32 () */
13116 #undef RS6000_BUILTIN_0
13117 #undef RS6000_BUILTIN_1
13118 #undef RS6000_BUILTIN_2
13119 #undef RS6000_BUILTIN_3
13120 #undef RS6000_BUILTIN_A
13121 #undef RS6000_BUILTIN_D
13122 #undef RS6000_BUILTIN_E
13123 #undef RS6000_BUILTIN_H
13124 #undef RS6000_BUILTIN_P
13125 #undef RS6000_BUILTIN_Q
13126 #undef RS6000_BUILTIN_S
13127 #undef RS6000_BUILTIN_X
13129 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13130 { MASK, ICODE, NAME, ENUM },
13132 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13133 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13134 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13135 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13136 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13137 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13138 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13139 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13140 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13141 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13142 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13144 static const struct builtin_description bdesc_0arg[] =
13146 #include "rs6000-builtin.def"
13149 /* HTM builtins. */
13150 #undef RS6000_BUILTIN_0
13151 #undef RS6000_BUILTIN_1
13152 #undef RS6000_BUILTIN_2
13153 #undef RS6000_BUILTIN_3
13154 #undef RS6000_BUILTIN_A
13155 #undef RS6000_BUILTIN_D
13156 #undef RS6000_BUILTIN_E
13157 #undef RS6000_BUILTIN_H
13158 #undef RS6000_BUILTIN_P
13159 #undef RS6000_BUILTIN_Q
13160 #undef RS6000_BUILTIN_S
13161 #undef RS6000_BUILTIN_X
13163 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13164 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13165 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13166 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13167 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13168 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13169 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13170 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13171 { MASK, ICODE, NAME, ENUM },
13173 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13174 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13175 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13176 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13178 static const struct builtin_description bdesc_htm[] =
13180 #include "rs6000-builtin.def"
13183 #undef RS6000_BUILTIN_0
13184 #undef RS6000_BUILTIN_1
13185 #undef RS6000_BUILTIN_2
13186 #undef RS6000_BUILTIN_3
13187 #undef RS6000_BUILTIN_A
13188 #undef RS6000_BUILTIN_D
13189 #undef RS6000_BUILTIN_E
13190 #undef RS6000_BUILTIN_H
13191 #undef RS6000_BUILTIN_P
13192 #undef RS6000_BUILTIN_Q
13193 #undef RS6000_BUILTIN_S
13195 /* Return true if a builtin function is overloaded. */
13196 bool
13197 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13199 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13202 /* Expand an expression EXP that calls a builtin without arguments. */
13203 static rtx
13204 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13206 rtx pat;
13207 machine_mode tmode = insn_data[icode].operand[0].mode;
13209 if (icode == CODE_FOR_nothing)
13210 /* Builtin not supported on this processor. */
13211 return 0;
13213 if (target == 0
13214 || GET_MODE (target) != tmode
13215 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13216 target = gen_reg_rtx (tmode);
13218 pat = GEN_FCN (icode) (target);
13219 if (! pat)
13220 return 0;
13221 emit_insn (pat);
13223 return target;
13227 static rtx
13228 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13230 rtx pat;
13231 tree arg0 = CALL_EXPR_ARG (exp, 0);
13232 tree arg1 = CALL_EXPR_ARG (exp, 1);
13233 rtx op0 = expand_normal (arg0);
13234 rtx op1 = expand_normal (arg1);
13235 machine_mode mode0 = insn_data[icode].operand[0].mode;
13236 machine_mode mode1 = insn_data[icode].operand[1].mode;
13238 if (icode == CODE_FOR_nothing)
13239 /* Builtin not supported on this processor. */
13240 return 0;
13242 /* If we got invalid arguments bail out before generating bad rtl. */
13243 if (arg0 == error_mark_node || arg1 == error_mark_node)
13244 return const0_rtx;
13246 if (GET_CODE (op0) != CONST_INT
13247 || INTVAL (op0) > 255
13248 || INTVAL (op0) < 0)
13250 error ("argument 1 must be an 8-bit field value");
13251 return const0_rtx;
13254 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13255 op0 = copy_to_mode_reg (mode0, op0);
13257 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13258 op1 = copy_to_mode_reg (mode1, op1);
13260 pat = GEN_FCN (icode) (op0, op1);
13261 if (! pat)
13262 return const0_rtx;
13263 emit_insn (pat);
13265 return NULL_RTX;
13268 static rtx
13269 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13271 rtx pat;
13272 tree arg0 = CALL_EXPR_ARG (exp, 0);
13273 rtx op0 = expand_normal (arg0);
13274 machine_mode tmode = insn_data[icode].operand[0].mode;
13275 machine_mode mode0 = insn_data[icode].operand[1].mode;
13277 if (icode == CODE_FOR_nothing)
13278 /* Builtin not supported on this processor. */
13279 return 0;
13281 /* If we got invalid arguments bail out before generating bad rtl. */
13282 if (arg0 == error_mark_node)
13283 return const0_rtx;
13285 if (icode == CODE_FOR_altivec_vspltisb
13286 || icode == CODE_FOR_altivec_vspltish
13287 || icode == CODE_FOR_altivec_vspltisw
13288 || icode == CODE_FOR_spe_evsplatfi
13289 || icode == CODE_FOR_spe_evsplati)
13291 /* Only allow 5-bit *signed* literals. */
13292 if (GET_CODE (op0) != CONST_INT
13293 || INTVAL (op0) > 15
13294 || INTVAL (op0) < -16)
13296 error ("argument 1 must be a 5-bit signed literal");
13297 return const0_rtx;
13301 if (target == 0
13302 || GET_MODE (target) != tmode
13303 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13304 target = gen_reg_rtx (tmode);
13306 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13307 op0 = copy_to_mode_reg (mode0, op0);
13309 pat = GEN_FCN (icode) (target, op0);
13310 if (! pat)
13311 return 0;
13312 emit_insn (pat);
13314 return target;
13317 static rtx
13318 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13320 rtx pat, scratch1, scratch2;
13321 tree arg0 = CALL_EXPR_ARG (exp, 0);
13322 rtx op0 = expand_normal (arg0);
13323 machine_mode tmode = insn_data[icode].operand[0].mode;
13324 machine_mode mode0 = insn_data[icode].operand[1].mode;
13326 /* If we have invalid arguments, bail out before generating bad rtl. */
13327 if (arg0 == error_mark_node)
13328 return const0_rtx;
13330 if (target == 0
13331 || GET_MODE (target) != tmode
13332 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13333 target = gen_reg_rtx (tmode);
13335 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13336 op0 = copy_to_mode_reg (mode0, op0);
13338 scratch1 = gen_reg_rtx (mode0);
13339 scratch2 = gen_reg_rtx (mode0);
13341 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13342 if (! pat)
13343 return 0;
13344 emit_insn (pat);
13346 return target;
13349 static rtx
13350 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13352 rtx pat;
13353 tree arg0 = CALL_EXPR_ARG (exp, 0);
13354 tree arg1 = CALL_EXPR_ARG (exp, 1);
13355 rtx op0 = expand_normal (arg0);
13356 rtx op1 = expand_normal (arg1);
13357 machine_mode tmode = insn_data[icode].operand[0].mode;
13358 machine_mode mode0 = insn_data[icode].operand[1].mode;
13359 machine_mode mode1 = insn_data[icode].operand[2].mode;
13361 if (icode == CODE_FOR_nothing)
13362 /* Builtin not supported on this processor. */
13363 return 0;
13365 /* If we got invalid arguments bail out before generating bad rtl. */
13366 if (arg0 == error_mark_node || arg1 == error_mark_node)
13367 return const0_rtx;
13369 if (icode == CODE_FOR_altivec_vcfux
13370 || icode == CODE_FOR_altivec_vcfsx
13371 || icode == CODE_FOR_altivec_vctsxs
13372 || icode == CODE_FOR_altivec_vctuxs
13373 || icode == CODE_FOR_altivec_vspltb
13374 || icode == CODE_FOR_altivec_vsplth
13375 || icode == CODE_FOR_altivec_vspltw
13376 || icode == CODE_FOR_spe_evaddiw
13377 || icode == CODE_FOR_spe_evldd
13378 || icode == CODE_FOR_spe_evldh
13379 || icode == CODE_FOR_spe_evldw
13380 || icode == CODE_FOR_spe_evlhhesplat
13381 || icode == CODE_FOR_spe_evlhhossplat
13382 || icode == CODE_FOR_spe_evlhhousplat
13383 || icode == CODE_FOR_spe_evlwhe
13384 || icode == CODE_FOR_spe_evlwhos
13385 || icode == CODE_FOR_spe_evlwhou
13386 || icode == CODE_FOR_spe_evlwhsplat
13387 || icode == CODE_FOR_spe_evlwwsplat
13388 || icode == CODE_FOR_spe_evrlwi
13389 || icode == CODE_FOR_spe_evslwi
13390 || icode == CODE_FOR_spe_evsrwis
13391 || icode == CODE_FOR_spe_evsubifw
13392 || icode == CODE_FOR_spe_evsrwiu)
13394 /* Only allow 5-bit unsigned literals. */
13395 STRIP_NOPS (arg1);
13396 if (TREE_CODE (arg1) != INTEGER_CST
13397 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13399 error ("argument 2 must be a 5-bit unsigned literal");
13400 return const0_rtx;
13403 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13404 || icode == CODE_FOR_dfptstsfi_lt_dd
13405 || icode == CODE_FOR_dfptstsfi_gt_dd
13406 || icode == CODE_FOR_dfptstsfi_unordered_dd
13407 || icode == CODE_FOR_dfptstsfi_eq_td
13408 || icode == CODE_FOR_dfptstsfi_lt_td
13409 || icode == CODE_FOR_dfptstsfi_gt_td
13410 || icode == CODE_FOR_dfptstsfi_unordered_td)
13412 /* Only allow 6-bit unsigned literals. */
13413 STRIP_NOPS (arg0);
13414 if (TREE_CODE (arg0) != INTEGER_CST
13415 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13417 error ("argument 1 must be a 6-bit unsigned literal");
13418 return CONST0_RTX (tmode);
13422 if (target == 0
13423 || GET_MODE (target) != tmode
13424 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13425 target = gen_reg_rtx (tmode);
13427 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13428 op0 = copy_to_mode_reg (mode0, op0);
13429 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13430 op1 = copy_to_mode_reg (mode1, op1);
13432 pat = GEN_FCN (icode) (target, op0, op1);
13433 if (! pat)
13434 return 0;
13435 emit_insn (pat);
13437 return target;
13440 static rtx
13441 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13443 rtx pat, scratch;
13444 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13445 tree arg0 = CALL_EXPR_ARG (exp, 1);
13446 tree arg1 = CALL_EXPR_ARG (exp, 2);
13447 rtx op0 = expand_normal (arg0);
13448 rtx op1 = expand_normal (arg1);
13449 machine_mode tmode = SImode;
13450 machine_mode mode0 = insn_data[icode].operand[1].mode;
13451 machine_mode mode1 = insn_data[icode].operand[2].mode;
13452 int cr6_form_int;
13454 if (TREE_CODE (cr6_form) != INTEGER_CST)
13456 error ("argument 1 of __builtin_altivec_predicate must be a constant");
13457 return const0_rtx;
13459 else
13460 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13462 gcc_assert (mode0 == mode1);
13464 /* If we have invalid arguments, bail out before generating bad rtl. */
13465 if (arg0 == error_mark_node || arg1 == error_mark_node)
13466 return const0_rtx;
13468 if (target == 0
13469 || GET_MODE (target) != tmode
13470 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13471 target = gen_reg_rtx (tmode);
13473 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13474 op0 = copy_to_mode_reg (mode0, op0);
13475 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13476 op1 = copy_to_mode_reg (mode1, op1);
13478 scratch = gen_reg_rtx (mode0);
13480 pat = GEN_FCN (icode) (scratch, op0, op1);
13481 if (! pat)
13482 return 0;
13483 emit_insn (pat);
13485 /* The vec_any* and vec_all* predicates use the same opcodes for two
13486 different operations, but the bits in CR6 will be different
13487 depending on what information we want. So we have to play tricks
13488 with CR6 to get the right bits out.
13490 If you think this is disgusting, look at the specs for the
13491 AltiVec predicates. */
13493 switch (cr6_form_int)
13495 case 0:
13496 emit_insn (gen_cr6_test_for_zero (target));
13497 break;
13498 case 1:
13499 emit_insn (gen_cr6_test_for_zero_reverse (target));
13500 break;
13501 case 2:
13502 emit_insn (gen_cr6_test_for_lt (target));
13503 break;
13504 case 3:
13505 emit_insn (gen_cr6_test_for_lt_reverse (target));
13506 break;
13507 default:
13508 error ("argument 1 of __builtin_altivec_predicate is out of range");
13509 break;
13512 return target;
13515 static rtx
13516 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
13518 rtx pat, addr;
13519 tree arg0 = CALL_EXPR_ARG (exp, 0);
13520 tree arg1 = CALL_EXPR_ARG (exp, 1);
13521 machine_mode tmode = insn_data[icode].operand[0].mode;
13522 machine_mode mode0 = Pmode;
13523 machine_mode mode1 = Pmode;
13524 rtx op0 = expand_normal (arg0);
13525 rtx op1 = expand_normal (arg1);
13527 if (icode == CODE_FOR_nothing)
13528 /* Builtin not supported on this processor. */
13529 return 0;
13531 /* If we got invalid arguments bail out before generating bad rtl. */
13532 if (arg0 == error_mark_node || arg1 == error_mark_node)
13533 return const0_rtx;
13535 if (target == 0
13536 || GET_MODE (target) != tmode
13537 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13538 target = gen_reg_rtx (tmode);
13540 op1 = copy_to_mode_reg (mode1, op1);
13542 if (op0 == const0_rtx)
13544 addr = gen_rtx_MEM (tmode, op1);
13546 else
13548 op0 = copy_to_mode_reg (mode0, op0);
13549 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
13552 pat = GEN_FCN (icode) (target, addr);
13554 if (! pat)
13555 return 0;
13556 emit_insn (pat);
13558 return target;
13561 /* Return a constant vector for use as a little-endian permute control vector
13562 to reverse the order of elements of the given vector mode. */
13563 static rtx
13564 swap_selector_for_mode (machine_mode mode)
13566 /* These are little endian vectors, so their elements are reversed
13567 from what you would normally expect for a permute control vector. */
13568 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13569 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13570 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13571 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
13572 unsigned int *swaparray, i;
13573 rtx perm[16];
13575 switch (mode)
13577 case V2DFmode:
13578 case V2DImode:
13579 swaparray = swap2;
13580 break;
13581 case V4SFmode:
13582 case V4SImode:
13583 swaparray = swap4;
13584 break;
13585 case V8HImode:
13586 swaparray = swap8;
13587 break;
13588 case V16QImode:
13589 swaparray = swap16;
13590 break;
13591 default:
13592 gcc_unreachable ();
13595 for (i = 0; i < 16; ++i)
13596 perm[i] = GEN_INT (swaparray[i]);
13598 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
13601 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
13602 with -maltivec=be specified. Issue the load followed by an element-
13603 reversing permute. */
13604 void
13605 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13607 rtx tmp = gen_reg_rtx (mode);
13608 rtx load = gen_rtx_SET (tmp, op1);
13609 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
13610 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
13611 rtx sel = swap_selector_for_mode (mode);
13612 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
13614 gcc_assert (REG_P (op0));
13615 emit_insn (par);
13616 emit_insn (gen_rtx_SET (op0, vperm));
13619 /* Generate code for a "stvxl" built-in for a little endian target with
13620 -maltivec=be specified. Issue the store preceded by an element-reversing
13621 permute. */
13622 void
13623 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13625 rtx tmp = gen_reg_rtx (mode);
13626 rtx store = gen_rtx_SET (op0, tmp);
13627 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
13628 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
13629 rtx sel = swap_selector_for_mode (mode);
13630 rtx vperm;
13632 gcc_assert (REG_P (op1));
13633 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
13634 emit_insn (gen_rtx_SET (tmp, vperm));
13635 emit_insn (par);
13638 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
13639 specified. Issue the store preceded by an element-reversing permute. */
13640 void
13641 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13643 machine_mode inner_mode = GET_MODE_INNER (mode);
13644 rtx tmp = gen_reg_rtx (mode);
13645 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
13646 rtx sel = swap_selector_for_mode (mode);
13647 rtx vperm;
13649 gcc_assert (REG_P (op1));
13650 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
13651 emit_insn (gen_rtx_SET (tmp, vperm));
13652 emit_insn (gen_rtx_SET (op0, stvx));
13655 static rtx
13656 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13658 rtx pat, addr;
13659 tree arg0 = CALL_EXPR_ARG (exp, 0);
13660 tree arg1 = CALL_EXPR_ARG (exp, 1);
13661 machine_mode tmode = insn_data[icode].operand[0].mode;
13662 machine_mode mode0 = Pmode;
13663 machine_mode mode1 = Pmode;
13664 rtx op0 = expand_normal (arg0);
13665 rtx op1 = expand_normal (arg1);
13667 if (icode == CODE_FOR_nothing)
13668 /* Builtin not supported on this processor. */
13669 return 0;
13671 /* If we got invalid arguments bail out before generating bad rtl. */
13672 if (arg0 == error_mark_node || arg1 == error_mark_node)
13673 return const0_rtx;
13675 if (target == 0
13676 || GET_MODE (target) != tmode
13677 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13678 target = gen_reg_rtx (tmode);
13680 op1 = copy_to_mode_reg (mode1, op1);
13682 /* For LVX, express the RTL accurately by ANDing the address with -16.
13683 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13684 so the raw address is fine. */
13685 if (icode == CODE_FOR_altivec_lvx_v2df_2op
13686 || icode == CODE_FOR_altivec_lvx_v2di_2op
13687 || icode == CODE_FOR_altivec_lvx_v4sf_2op
13688 || icode == CODE_FOR_altivec_lvx_v4si_2op
13689 || icode == CODE_FOR_altivec_lvx_v8hi_2op
13690 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
13692 rtx rawaddr;
13693 if (op0 == const0_rtx)
13694 rawaddr = op1;
13695 else
13697 op0 = copy_to_mode_reg (mode0, op0);
13698 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13700 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13701 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13703 /* For -maltivec=be, emit the load and follow it up with a
13704 permute to swap the elements. */
13705 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
13707 rtx temp = gen_reg_rtx (tmode);
13708 emit_insn (gen_rtx_SET (temp, addr));
13710 rtx sel = swap_selector_for_mode (tmode);
13711 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
13712 UNSPEC_VPERM);
13713 emit_insn (gen_rtx_SET (target, vperm));
13715 else
13716 emit_insn (gen_rtx_SET (target, addr));
13718 else
13720 if (op0 == const0_rtx)
13721 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13722 else
13724 op0 = copy_to_mode_reg (mode0, op0);
13725 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13726 gen_rtx_PLUS (Pmode, op1, op0));
13729 pat = GEN_FCN (icode) (target, addr);
13730 if (! pat)
13731 return 0;
13732 emit_insn (pat);
13735 return target;
13738 static rtx
13739 spe_expand_stv_builtin (enum insn_code icode, tree exp)
13741 tree arg0 = CALL_EXPR_ARG (exp, 0);
13742 tree arg1 = CALL_EXPR_ARG (exp, 1);
13743 tree arg2 = CALL_EXPR_ARG (exp, 2);
13744 rtx op0 = expand_normal (arg0);
13745 rtx op1 = expand_normal (arg1);
13746 rtx op2 = expand_normal (arg2);
13747 rtx pat;
13748 machine_mode mode0 = insn_data[icode].operand[0].mode;
13749 machine_mode mode1 = insn_data[icode].operand[1].mode;
13750 machine_mode mode2 = insn_data[icode].operand[2].mode;
13752 /* Invalid arguments. Bail before doing anything stoopid! */
13753 if (arg0 == error_mark_node
13754 || arg1 == error_mark_node
13755 || arg2 == error_mark_node)
13756 return const0_rtx;
13758 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
13759 op0 = copy_to_mode_reg (mode2, op0);
13760 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
13761 op1 = copy_to_mode_reg (mode0, op1);
13762 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
13763 op2 = copy_to_mode_reg (mode1, op2);
13765 pat = GEN_FCN (icode) (op1, op2, op0);
13766 if (pat)
13767 emit_insn (pat);
13768 return NULL_RTX;
13771 static rtx
13772 paired_expand_stv_builtin (enum insn_code icode, tree exp)
13774 tree arg0 = CALL_EXPR_ARG (exp, 0);
13775 tree arg1 = CALL_EXPR_ARG (exp, 1);
13776 tree arg2 = CALL_EXPR_ARG (exp, 2);
13777 rtx op0 = expand_normal (arg0);
13778 rtx op1 = expand_normal (arg1);
13779 rtx op2 = expand_normal (arg2);
13780 rtx pat, addr;
13781 machine_mode tmode = insn_data[icode].operand[0].mode;
13782 machine_mode mode1 = Pmode;
13783 machine_mode mode2 = Pmode;
13785 /* Invalid arguments. Bail before doing anything stoopid! */
13786 if (arg0 == error_mark_node
13787 || arg1 == error_mark_node
13788 || arg2 == error_mark_node)
13789 return const0_rtx;
13791 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
13792 op0 = copy_to_mode_reg (tmode, op0);
13794 op2 = copy_to_mode_reg (mode2, op2);
13796 if (op1 == const0_rtx)
13798 addr = gen_rtx_MEM (tmode, op2);
13800 else
13802 op1 = copy_to_mode_reg (mode1, op1);
13803 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
13806 pat = GEN_FCN (icode) (addr, op0);
13807 if (pat)
13808 emit_insn (pat);
13809 return NULL_RTX;
13812 static rtx
13813 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13815 tree arg0 = CALL_EXPR_ARG (exp, 0);
13816 tree arg1 = CALL_EXPR_ARG (exp, 1);
13817 tree arg2 = CALL_EXPR_ARG (exp, 2);
13818 rtx op0 = expand_normal (arg0);
13819 rtx op1 = expand_normal (arg1);
13820 rtx op2 = expand_normal (arg2);
13821 rtx pat, addr, rawaddr;
13822 machine_mode tmode = insn_data[icode].operand[0].mode;
13823 machine_mode smode = insn_data[icode].operand[1].mode;
13824 machine_mode mode1 = Pmode;
13825 machine_mode mode2 = Pmode;
13827 /* Invalid arguments. Bail before doing anything stoopid! */
13828 if (arg0 == error_mark_node
13829 || arg1 == error_mark_node
13830 || arg2 == error_mark_node)
13831 return const0_rtx;
13833 op2 = copy_to_mode_reg (mode2, op2);
13835 /* For STVX, express the RTL accurately by ANDing the address with -16.
13836 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13837 so the raw address is fine. */
13838 if (icode == CODE_FOR_altivec_stvx_v2df_2op
13839 || icode == CODE_FOR_altivec_stvx_v2di_2op
13840 || icode == CODE_FOR_altivec_stvx_v4sf_2op
13841 || icode == CODE_FOR_altivec_stvx_v4si_2op
13842 || icode == CODE_FOR_altivec_stvx_v8hi_2op
13843 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
13845 if (op1 == const0_rtx)
13846 rawaddr = op2;
13847 else
13849 op1 = copy_to_mode_reg (mode1, op1);
13850 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13853 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13854 addr = gen_rtx_MEM (tmode, addr);
13856 op0 = copy_to_mode_reg (tmode, op0);
13858 /* For -maltivec=be, emit a permute to swap the elements, followed
13859 by the store. */
13860 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
13862 rtx temp = gen_reg_rtx (tmode);
13863 rtx sel = swap_selector_for_mode (tmode);
13864 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
13865 UNSPEC_VPERM);
13866 emit_insn (gen_rtx_SET (temp, vperm));
13867 emit_insn (gen_rtx_SET (addr, temp));
13869 else
13870 emit_insn (gen_rtx_SET (addr, op0));
13872 else
13874 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13875 op0 = copy_to_mode_reg (smode, op0);
13877 if (op1 == const0_rtx)
13878 addr = gen_rtx_MEM (tmode, op2);
13879 else
13881 op1 = copy_to_mode_reg (mode1, op1);
13882 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13885 pat = GEN_FCN (icode) (addr, op0);
13886 if (pat)
13887 emit_insn (pat);
13890 return NULL_RTX;
13893 /* Return the appropriate SPR number associated with the given builtin. */
13894 static inline HOST_WIDE_INT
13895 htm_spr_num (enum rs6000_builtins code)
13897 if (code == HTM_BUILTIN_GET_TFHAR
13898 || code == HTM_BUILTIN_SET_TFHAR)
13899 return TFHAR_SPR;
13900 else if (code == HTM_BUILTIN_GET_TFIAR
13901 || code == HTM_BUILTIN_SET_TFIAR)
13902 return TFIAR_SPR;
13903 else if (code == HTM_BUILTIN_GET_TEXASR
13904 || code == HTM_BUILTIN_SET_TEXASR)
13905 return TEXASR_SPR;
13906 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13907 || code == HTM_BUILTIN_SET_TEXASRU);
13908 return TEXASRU_SPR;
13911 /* Return the appropriate SPR regno associated with the given builtin. */
13912 static inline HOST_WIDE_INT
13913 htm_spr_regno (enum rs6000_builtins code)
13915 if (code == HTM_BUILTIN_GET_TFHAR
13916 || code == HTM_BUILTIN_SET_TFHAR)
13917 return TFHAR_REGNO;
13918 else if (code == HTM_BUILTIN_GET_TFIAR
13919 || code == HTM_BUILTIN_SET_TFIAR)
13920 return TFIAR_REGNO;
13921 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
13922 || code == HTM_BUILTIN_SET_TEXASR
13923 || code == HTM_BUILTIN_GET_TEXASRU
13924 || code == HTM_BUILTIN_SET_TEXASRU);
13925 return TEXASR_REGNO;
13928 /* Return the correct ICODE value depending on whether we are
13929 setting or reading the HTM SPRs. */
13930 static inline enum insn_code
13931 rs6000_htm_spr_icode (bool nonvoid)
13933 if (nonvoid)
13934 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13935 else
13936 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13939 /* Expand the HTM builtin in EXP and store the result in TARGET.
13940 Store true in *EXPANDEDP if we found a builtin to expand. */
13941 static rtx
13942 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13944 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13945 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13946 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13947 const struct builtin_description *d;
13948 size_t i;
13950 *expandedp = true;
13952 if (!TARGET_POWERPC64
13953 && (fcode == HTM_BUILTIN_TABORTDC
13954 || fcode == HTM_BUILTIN_TABORTDCI))
13956 size_t uns_fcode = (size_t)fcode;
13957 const char *name = rs6000_builtin_info[uns_fcode].name;
13958 error ("builtin %s is only valid in 64-bit mode", name);
13959 return const0_rtx;
13962 /* Expand the HTM builtins. */
13963 d = bdesc_htm;
13964 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13965 if (d->code == fcode)
13967 rtx op[MAX_HTM_OPERANDS], pat;
13968 int nopnds = 0;
13969 tree arg;
13970 call_expr_arg_iterator iter;
13971 unsigned attr = rs6000_builtin_info[fcode].attr;
13972 enum insn_code icode = d->icode;
13973 const struct insn_operand_data *insn_op;
13974 bool uses_spr = (attr & RS6000_BTC_SPR);
13975 rtx cr = NULL_RTX;
13977 if (uses_spr)
13978 icode = rs6000_htm_spr_icode (nonvoid);
13979 insn_op = &insn_data[icode].operand[0];
13981 if (nonvoid)
13983 machine_mode tmode = (uses_spr) ? insn_op->mode : SImode;
13984 if (!target
13985 || GET_MODE (target) != tmode
13986 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13987 target = gen_reg_rtx (tmode);
13988 if (uses_spr)
13989 op[nopnds++] = target;
13992 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13994 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13995 return const0_rtx;
13997 insn_op = &insn_data[icode].operand[nopnds];
13999 op[nopnds] = expand_normal (arg);
14001 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14003 if (!strcmp (insn_op->constraint, "n"))
14005 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14006 if (!CONST_INT_P (op[nopnds]))
14007 error ("argument %d must be an unsigned literal", arg_num);
14008 else
14009 error ("argument %d is an unsigned literal that is "
14010 "out of range", arg_num);
14011 return const0_rtx;
14013 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14016 nopnds++;
14019 /* Handle the builtins for extended mnemonics. These accept
14020 no arguments, but map to builtins that take arguments. */
14021 switch (fcode)
14023 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14024 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14025 op[nopnds++] = GEN_INT (1);
14026 if (flag_checking)
14027 attr |= RS6000_BTC_UNARY;
14028 break;
14029 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14030 op[nopnds++] = GEN_INT (0);
14031 if (flag_checking)
14032 attr |= RS6000_BTC_UNARY;
14033 break;
14034 default:
14035 break;
14038 /* If this builtin accesses SPRs, then pass in the appropriate
14039 SPR number and SPR regno as the last two operands. */
14040 if (uses_spr)
14042 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14043 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14044 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14046 /* If this builtin accesses a CR, then pass in a scratch
14047 CR as the last operand. */
14048 else if (attr & RS6000_BTC_CR)
14049 { cr = gen_reg_rtx (CCmode);
14050 op[nopnds++] = cr;
14053 if (flag_checking)
14055 int expected_nopnds = 0;
14056 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14057 expected_nopnds = 1;
14058 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14059 expected_nopnds = 2;
14060 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14061 expected_nopnds = 3;
14062 if (!(attr & RS6000_BTC_VOID))
14063 expected_nopnds += 1;
14064 if (uses_spr)
14065 expected_nopnds += 2;
14067 gcc_assert (nopnds == expected_nopnds
14068 && nopnds <= MAX_HTM_OPERANDS);
14071 switch (nopnds)
14073 case 1:
14074 pat = GEN_FCN (icode) (op[0]);
14075 break;
14076 case 2:
14077 pat = GEN_FCN (icode) (op[0], op[1]);
14078 break;
14079 case 3:
14080 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14081 break;
14082 case 4:
14083 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14084 break;
14085 default:
14086 gcc_unreachable ();
14088 if (!pat)
14089 return NULL_RTX;
14090 emit_insn (pat);
14092 if (attr & RS6000_BTC_CR)
14094 if (fcode == HTM_BUILTIN_TBEGIN)
14096 /* Emit code to set TARGET to true or false depending on
14097 whether the tbegin. instruction successfully or failed
14098 to start a transaction. We do this by placing the 1's
14099 complement of CR's EQ bit into TARGET. */
14100 rtx scratch = gen_reg_rtx (SImode);
14101 emit_insn (gen_rtx_SET (scratch,
14102 gen_rtx_EQ (SImode, cr,
14103 const0_rtx)));
14104 emit_insn (gen_rtx_SET (target,
14105 gen_rtx_XOR (SImode, scratch,
14106 GEN_INT (1))));
14108 else
14110 /* Emit code to copy the 4-bit condition register field
14111 CR into the least significant end of register TARGET. */
14112 rtx scratch1 = gen_reg_rtx (SImode);
14113 rtx scratch2 = gen_reg_rtx (SImode);
14114 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14115 emit_insn (gen_movcc (subreg, cr));
14116 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14117 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14121 if (nonvoid)
14122 return target;
14123 return const0_rtx;
14126 *expandedp = false;
14127 return NULL_RTX;
14130 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14132 static rtx
14133 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14134 rtx target)
14136 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14137 if (fcode == RS6000_BUILTIN_CPU_INIT)
14138 return const0_rtx;
14140 if (target == 0 || GET_MODE (target) != SImode)
14141 target = gen_reg_rtx (SImode);
14143 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14144 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14145 if (TREE_CODE (arg) != STRING_CST)
14147 error ("builtin %s only accepts a string argument",
14148 rs6000_builtin_info[(size_t) fcode].name);
14149 return const0_rtx;
14152 if (fcode == RS6000_BUILTIN_CPU_IS)
14154 const char *cpu = TREE_STRING_POINTER (arg);
14155 rtx cpuid = NULL_RTX;
14156 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14157 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14159 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14160 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14161 break;
14163 if (cpuid == NULL_RTX)
14165 /* Invalid CPU argument. */
14166 error ("cpu %s is an invalid argument to builtin %s",
14167 cpu, rs6000_builtin_info[(size_t) fcode].name);
14168 return const0_rtx;
14171 rtx platform = gen_reg_rtx (SImode);
14172 rtx tcbmem = gen_const_mem (SImode,
14173 gen_rtx_PLUS (Pmode,
14174 gen_rtx_REG (Pmode, TLS_REGNUM),
14175 GEN_INT (TCB_PLATFORM_OFFSET)));
14176 emit_move_insn (platform, tcbmem);
14177 emit_insn (gen_eqsi3 (target, platform, cpuid));
14179 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14181 const char *hwcap = TREE_STRING_POINTER (arg);
14182 rtx mask = NULL_RTX;
14183 int hwcap_offset;
14184 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14185 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14187 mask = GEN_INT (cpu_supports_info[i].mask);
14188 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14189 break;
14191 if (mask == NULL_RTX)
14193 /* Invalid HWCAP argument. */
14194 error ("hwcap %s is an invalid argument to builtin %s",
14195 hwcap, rs6000_builtin_info[(size_t) fcode].name);
14196 return const0_rtx;
14199 rtx tcb_hwcap = gen_reg_rtx (SImode);
14200 rtx tcbmem = gen_const_mem (SImode,
14201 gen_rtx_PLUS (Pmode,
14202 gen_rtx_REG (Pmode, TLS_REGNUM),
14203 GEN_INT (hwcap_offset)));
14204 emit_move_insn (tcb_hwcap, tcbmem);
14205 rtx scratch1 = gen_reg_rtx (SImode);
14206 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14207 rtx scratch2 = gen_reg_rtx (SImode);
14208 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14209 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14212 /* Record that we have expanded a CPU builtin, so that we can later
14213 emit a reference to the special symbol exported by LIBC to ensure we
14214 do not link against an old LIBC that doesn't support this feature. */
14215 cpu_builtin_p = true;
14217 #else
14218 /* For old LIBCs, always return FALSE. */
14219 emit_move_insn (target, GEN_INT (0));
14220 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14222 return target;
14225 static rtx
14226 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14228 rtx pat;
14229 tree arg0 = CALL_EXPR_ARG (exp, 0);
14230 tree arg1 = CALL_EXPR_ARG (exp, 1);
14231 tree arg2 = CALL_EXPR_ARG (exp, 2);
14232 rtx op0 = expand_normal (arg0);
14233 rtx op1 = expand_normal (arg1);
14234 rtx op2 = expand_normal (arg2);
14235 machine_mode tmode = insn_data[icode].operand[0].mode;
14236 machine_mode mode0 = insn_data[icode].operand[1].mode;
14237 machine_mode mode1 = insn_data[icode].operand[2].mode;
14238 machine_mode mode2 = insn_data[icode].operand[3].mode;
14240 if (icode == CODE_FOR_nothing)
14241 /* Builtin not supported on this processor. */
14242 return 0;
14244 /* If we got invalid arguments bail out before generating bad rtl. */
14245 if (arg0 == error_mark_node
14246 || arg1 == error_mark_node
14247 || arg2 == error_mark_node)
14248 return const0_rtx;
14250 /* Check and prepare argument depending on the instruction code.
14252 Note that a switch statement instead of the sequence of tests
14253 would be incorrect as many of the CODE_FOR values could be
14254 CODE_FOR_nothing and that would yield multiple alternatives
14255 with identical values. We'd never reach here at runtime in
14256 this case. */
14257 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14258 || icode == CODE_FOR_altivec_vsldoi_v4si
14259 || icode == CODE_FOR_altivec_vsldoi_v8hi
14260 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14262 /* Only allow 4-bit unsigned literals. */
14263 STRIP_NOPS (arg2);
14264 if (TREE_CODE (arg2) != INTEGER_CST
14265 || TREE_INT_CST_LOW (arg2) & ~0xf)
14267 error ("argument 3 must be a 4-bit unsigned literal");
14268 return const0_rtx;
14271 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14272 || icode == CODE_FOR_vsx_xxpermdi_v2di
14273 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14274 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14275 || icode == CODE_FOR_vsx_xxsldwi_v4si
14276 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14277 || icode == CODE_FOR_vsx_xxsldwi_v2di
14278 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14280 /* Only allow 2-bit unsigned literals. */
14281 STRIP_NOPS (arg2);
14282 if (TREE_CODE (arg2) != INTEGER_CST
14283 || TREE_INT_CST_LOW (arg2) & ~0x3)
14285 error ("argument 3 must be a 2-bit unsigned literal");
14286 return const0_rtx;
14289 else if (icode == CODE_FOR_vsx_set_v2df
14290 || icode == CODE_FOR_vsx_set_v2di
14291 || icode == CODE_FOR_bcdadd
14292 || icode == CODE_FOR_bcdadd_lt
14293 || icode == CODE_FOR_bcdadd_eq
14294 || icode == CODE_FOR_bcdadd_gt
14295 || icode == CODE_FOR_bcdsub
14296 || icode == CODE_FOR_bcdsub_lt
14297 || icode == CODE_FOR_bcdsub_eq
14298 || icode == CODE_FOR_bcdsub_gt)
14300 /* Only allow 1-bit unsigned literals. */
14301 STRIP_NOPS (arg2);
14302 if (TREE_CODE (arg2) != INTEGER_CST
14303 || TREE_INT_CST_LOW (arg2) & ~0x1)
14305 error ("argument 3 must be a 1-bit unsigned literal");
14306 return const0_rtx;
14309 else if (icode == CODE_FOR_dfp_ddedpd_dd
14310 || icode == CODE_FOR_dfp_ddedpd_td)
14312 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14313 STRIP_NOPS (arg0);
14314 if (TREE_CODE (arg0) != INTEGER_CST
14315 || TREE_INT_CST_LOW (arg2) & ~0x3)
14317 error ("argument 1 must be 0 or 2");
14318 return const0_rtx;
14321 else if (icode == CODE_FOR_dfp_denbcd_dd
14322 || icode == CODE_FOR_dfp_denbcd_td)
14324 /* Only allow 1-bit unsigned literals. */
14325 STRIP_NOPS (arg0);
14326 if (TREE_CODE (arg0) != INTEGER_CST
14327 || TREE_INT_CST_LOW (arg0) & ~0x1)
14329 error ("argument 1 must be a 1-bit unsigned literal");
14330 return const0_rtx;
14333 else if (icode == CODE_FOR_dfp_dscli_dd
14334 || icode == CODE_FOR_dfp_dscli_td
14335 || icode == CODE_FOR_dfp_dscri_dd
14336 || icode == CODE_FOR_dfp_dscri_td)
14338 /* Only allow 6-bit unsigned literals. */
14339 STRIP_NOPS (arg1);
14340 if (TREE_CODE (arg1) != INTEGER_CST
14341 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14343 error ("argument 2 must be a 6-bit unsigned literal");
14344 return const0_rtx;
14347 else if (icode == CODE_FOR_crypto_vshasigmaw
14348 || icode == CODE_FOR_crypto_vshasigmad)
14350 /* Check whether the 2nd and 3rd arguments are integer constants and in
14351 range and prepare arguments. */
14352 STRIP_NOPS (arg1);
14353 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
14355 error ("argument 2 must be 0 or 1");
14356 return const0_rtx;
14359 STRIP_NOPS (arg2);
14360 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
14362 error ("argument 3 must be in the range 0..15");
14363 return const0_rtx;
14367 if (target == 0
14368 || GET_MODE (target) != tmode
14369 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14370 target = gen_reg_rtx (tmode);
14372 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14373 op0 = copy_to_mode_reg (mode0, op0);
14374 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14375 op1 = copy_to_mode_reg (mode1, op1);
14376 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14377 op2 = copy_to_mode_reg (mode2, op2);
14379 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
14380 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
14381 else
14382 pat = GEN_FCN (icode) (target, op0, op1, op2);
14383 if (! pat)
14384 return 0;
14385 emit_insn (pat);
14387 return target;
14390 /* Expand the lvx builtins. */
14391 static rtx
14392 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
14394 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14395 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14396 tree arg0;
14397 machine_mode tmode, mode0;
14398 rtx pat, op0;
14399 enum insn_code icode;
14401 switch (fcode)
14403 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
14404 icode = CODE_FOR_vector_altivec_load_v16qi;
14405 break;
14406 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
14407 icode = CODE_FOR_vector_altivec_load_v8hi;
14408 break;
14409 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
14410 icode = CODE_FOR_vector_altivec_load_v4si;
14411 break;
14412 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
14413 icode = CODE_FOR_vector_altivec_load_v4sf;
14414 break;
14415 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
14416 icode = CODE_FOR_vector_altivec_load_v2df;
14417 break;
14418 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
14419 icode = CODE_FOR_vector_altivec_load_v2di;
14420 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
14421 icode = CODE_FOR_vector_altivec_load_v1ti;
14422 break;
14423 default:
14424 *expandedp = false;
14425 return NULL_RTX;
14428 *expandedp = true;
14430 arg0 = CALL_EXPR_ARG (exp, 0);
14431 op0 = expand_normal (arg0);
14432 tmode = insn_data[icode].operand[0].mode;
14433 mode0 = insn_data[icode].operand[1].mode;
14435 if (target == 0
14436 || GET_MODE (target) != tmode
14437 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14438 target = gen_reg_rtx (tmode);
14440 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14441 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14443 pat = GEN_FCN (icode) (target, op0);
14444 if (! pat)
14445 return 0;
14446 emit_insn (pat);
14447 return target;
14450 /* Expand the stvx builtins. */
14451 static rtx
14452 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14453 bool *expandedp)
14455 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14456 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14457 tree arg0, arg1;
14458 machine_mode mode0, mode1;
14459 rtx pat, op0, op1;
14460 enum insn_code icode;
14462 switch (fcode)
14464 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
14465 icode = CODE_FOR_vector_altivec_store_v16qi;
14466 break;
14467 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
14468 icode = CODE_FOR_vector_altivec_store_v8hi;
14469 break;
14470 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
14471 icode = CODE_FOR_vector_altivec_store_v4si;
14472 break;
14473 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
14474 icode = CODE_FOR_vector_altivec_store_v4sf;
14475 break;
14476 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
14477 icode = CODE_FOR_vector_altivec_store_v2df;
14478 break;
14479 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
14480 icode = CODE_FOR_vector_altivec_store_v2di;
14481 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
14482 icode = CODE_FOR_vector_altivec_store_v1ti;
14483 break;
14484 default:
14485 *expandedp = false;
14486 return NULL_RTX;
14489 arg0 = CALL_EXPR_ARG (exp, 0);
14490 arg1 = CALL_EXPR_ARG (exp, 1);
14491 op0 = expand_normal (arg0);
14492 op1 = expand_normal (arg1);
14493 mode0 = insn_data[icode].operand[0].mode;
14494 mode1 = insn_data[icode].operand[1].mode;
14496 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14497 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14498 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14499 op1 = copy_to_mode_reg (mode1, op1);
14501 pat = GEN_FCN (icode) (op0, op1);
14502 if (pat)
14503 emit_insn (pat);
14505 *expandedp = true;
14506 return NULL_RTX;
14509 /* Expand the dst builtins. */
14510 static rtx
14511 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14512 bool *expandedp)
14514 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14515 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14516 tree arg0, arg1, arg2;
14517 machine_mode mode0, mode1;
14518 rtx pat, op0, op1, op2;
14519 const struct builtin_description *d;
14520 size_t i;
14522 *expandedp = false;
14524 /* Handle DST variants. */
14525 d = bdesc_dst;
14526 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14527 if (d->code == fcode)
14529 arg0 = CALL_EXPR_ARG (exp, 0);
14530 arg1 = CALL_EXPR_ARG (exp, 1);
14531 arg2 = CALL_EXPR_ARG (exp, 2);
14532 op0 = expand_normal (arg0);
14533 op1 = expand_normal (arg1);
14534 op2 = expand_normal (arg2);
14535 mode0 = insn_data[d->icode].operand[0].mode;
14536 mode1 = insn_data[d->icode].operand[1].mode;
14538 /* Invalid arguments, bail out before generating bad rtl. */
14539 if (arg0 == error_mark_node
14540 || arg1 == error_mark_node
14541 || arg2 == error_mark_node)
14542 return const0_rtx;
14544 *expandedp = true;
14545 STRIP_NOPS (arg2);
14546 if (TREE_CODE (arg2) != INTEGER_CST
14547 || TREE_INT_CST_LOW (arg2) & ~0x3)
14549 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14550 return const0_rtx;
14553 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14554 op0 = copy_to_mode_reg (Pmode, op0);
14555 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14556 op1 = copy_to_mode_reg (mode1, op1);
14558 pat = GEN_FCN (d->icode) (op0, op1, op2);
14559 if (pat != 0)
14560 emit_insn (pat);
14562 return NULL_RTX;
14565 return NULL_RTX;
14568 /* Expand vec_init builtin. */
14569 static rtx
14570 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14572 machine_mode tmode = TYPE_MODE (type);
14573 machine_mode inner_mode = GET_MODE_INNER (tmode);
14574 int i, n_elt = GET_MODE_NUNITS (tmode);
14576 gcc_assert (VECTOR_MODE_P (tmode));
14577 gcc_assert (n_elt == call_expr_nargs (exp));
14579 if (!target || !register_operand (target, tmode))
14580 target = gen_reg_rtx (tmode);
14582 /* If we have a vector compromised of a single element, such as V1TImode, do
14583 the initialization directly. */
14584 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14586 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14587 emit_move_insn (target, gen_lowpart (tmode, x));
14589 else
14591 rtvec v = rtvec_alloc (n_elt);
14593 for (i = 0; i < n_elt; ++i)
14595 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14596 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14599 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14602 return target;
14605 /* Return the integer constant in ARG. Constrain it to be in the range
14606 of the subparts of VEC_TYPE; issue an error if not. */
14608 static int
14609 get_element_number (tree vec_type, tree arg)
14611 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14613 if (!tree_fits_uhwi_p (arg)
14614 || (elt = tree_to_uhwi (arg), elt > max))
14616 error ("selector must be an integer constant in the range 0..%wi", max);
14617 return 0;
14620 return elt;
14623 /* Expand vec_set builtin. */
14624 static rtx
14625 altivec_expand_vec_set_builtin (tree exp)
14627 machine_mode tmode, mode1;
14628 tree arg0, arg1, arg2;
14629 int elt;
14630 rtx op0, op1;
14632 arg0 = CALL_EXPR_ARG (exp, 0);
14633 arg1 = CALL_EXPR_ARG (exp, 1);
14634 arg2 = CALL_EXPR_ARG (exp, 2);
14636 tmode = TYPE_MODE (TREE_TYPE (arg0));
14637 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14638 gcc_assert (VECTOR_MODE_P (tmode));
14640 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14641 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14642 elt = get_element_number (TREE_TYPE (arg0), arg2);
14644 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14645 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14647 op0 = force_reg (tmode, op0);
14648 op1 = force_reg (mode1, op1);
14650 rs6000_expand_vector_set (op0, op1, elt);
14652 return op0;
14655 /* Expand vec_ext builtin. */
14656 static rtx
14657 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14659 machine_mode tmode, mode0;
14660 tree arg0, arg1;
14661 int elt;
14662 rtx op0;
14664 arg0 = CALL_EXPR_ARG (exp, 0);
14665 arg1 = CALL_EXPR_ARG (exp, 1);
14667 op0 = expand_normal (arg0);
14668 elt = get_element_number (TREE_TYPE (arg0), arg1);
14670 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14671 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14672 gcc_assert (VECTOR_MODE_P (mode0));
14674 op0 = force_reg (mode0, op0);
14676 if (optimize || !target || !register_operand (target, tmode))
14677 target = gen_reg_rtx (tmode);
14679 rs6000_expand_vector_extract (target, op0, elt);
14681 return target;
14684 /* Expand the builtin in EXP and store the result in TARGET. Store
14685 true in *EXPANDEDP if we found a builtin to expand. */
14686 static rtx
14687 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14689 const struct builtin_description *d;
14690 size_t i;
14691 enum insn_code icode;
14692 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14693 tree arg0;
14694 rtx op0, pat;
14695 machine_mode tmode, mode0;
14696 enum rs6000_builtins fcode
14697 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14699 if (rs6000_overloaded_builtin_p (fcode))
14701 *expandedp = true;
14702 error ("unresolved overload for Altivec builtin %qF", fndecl);
14704 /* Given it is invalid, just generate a normal call. */
14705 return expand_call (exp, target, false);
14708 target = altivec_expand_ld_builtin (exp, target, expandedp);
14709 if (*expandedp)
14710 return target;
14712 target = altivec_expand_st_builtin (exp, target, expandedp);
14713 if (*expandedp)
14714 return target;
14716 target = altivec_expand_dst_builtin (exp, target, expandedp);
14717 if (*expandedp)
14718 return target;
14720 *expandedp = true;
14722 switch (fcode)
14724 case ALTIVEC_BUILTIN_STVX_V2DF:
14725 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
14726 case ALTIVEC_BUILTIN_STVX_V2DI:
14727 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
14728 case ALTIVEC_BUILTIN_STVX_V4SF:
14729 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
14730 case ALTIVEC_BUILTIN_STVX:
14731 case ALTIVEC_BUILTIN_STVX_V4SI:
14732 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
14733 case ALTIVEC_BUILTIN_STVX_V8HI:
14734 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
14735 case ALTIVEC_BUILTIN_STVX_V16QI:
14736 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
14737 case ALTIVEC_BUILTIN_STVEBX:
14738 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14739 case ALTIVEC_BUILTIN_STVEHX:
14740 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14741 case ALTIVEC_BUILTIN_STVEWX:
14742 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14743 case ALTIVEC_BUILTIN_STVXL_V2DF:
14744 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14745 case ALTIVEC_BUILTIN_STVXL_V2DI:
14746 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14747 case ALTIVEC_BUILTIN_STVXL_V4SF:
14748 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14749 case ALTIVEC_BUILTIN_STVXL:
14750 case ALTIVEC_BUILTIN_STVXL_V4SI:
14751 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14752 case ALTIVEC_BUILTIN_STVXL_V8HI:
14753 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14754 case ALTIVEC_BUILTIN_STVXL_V16QI:
14755 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14757 case ALTIVEC_BUILTIN_STVLX:
14758 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14759 case ALTIVEC_BUILTIN_STVLXL:
14760 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14761 case ALTIVEC_BUILTIN_STVRX:
14762 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14763 case ALTIVEC_BUILTIN_STVRXL:
14764 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14766 case VSX_BUILTIN_STXVD2X_V1TI:
14767 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14768 case VSX_BUILTIN_STXVD2X_V2DF:
14769 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14770 case VSX_BUILTIN_STXVD2X_V2DI:
14771 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14772 case VSX_BUILTIN_STXVW4X_V4SF:
14773 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14774 case VSX_BUILTIN_STXVW4X_V4SI:
14775 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14776 case VSX_BUILTIN_STXVW4X_V8HI:
14777 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14778 case VSX_BUILTIN_STXVW4X_V16QI:
14779 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14781 /* For the following on big endian, it's ok to use any appropriate
14782 unaligned-supporting store, so use a generic expander. For
14783 little-endian, the exact element-reversing instruction must
14784 be used. */
14785 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14787 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14788 : CODE_FOR_vsx_st_elemrev_v2df);
14789 return altivec_expand_stv_builtin (code, exp);
14791 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14793 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14794 : CODE_FOR_vsx_st_elemrev_v2di);
14795 return altivec_expand_stv_builtin (code, exp);
14797 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14799 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14800 : CODE_FOR_vsx_st_elemrev_v4sf);
14801 return altivec_expand_stv_builtin (code, exp);
14803 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14805 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14806 : CODE_FOR_vsx_st_elemrev_v4si);
14807 return altivec_expand_stv_builtin (code, exp);
14809 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14811 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14812 : CODE_FOR_vsx_st_elemrev_v8hi);
14813 return altivec_expand_stv_builtin (code, exp);
14815 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14817 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14818 : CODE_FOR_vsx_st_elemrev_v16qi);
14819 return altivec_expand_stv_builtin (code, exp);
14822 case ALTIVEC_BUILTIN_MFVSCR:
14823 icode = CODE_FOR_altivec_mfvscr;
14824 tmode = insn_data[icode].operand[0].mode;
14826 if (target == 0
14827 || GET_MODE (target) != tmode
14828 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14829 target = gen_reg_rtx (tmode);
14831 pat = GEN_FCN (icode) (target);
14832 if (! pat)
14833 return 0;
14834 emit_insn (pat);
14835 return target;
14837 case ALTIVEC_BUILTIN_MTVSCR:
14838 icode = CODE_FOR_altivec_mtvscr;
14839 arg0 = CALL_EXPR_ARG (exp, 0);
14840 op0 = expand_normal (arg0);
14841 mode0 = insn_data[icode].operand[0].mode;
14843 /* If we got invalid arguments bail out before generating bad rtl. */
14844 if (arg0 == error_mark_node)
14845 return const0_rtx;
14847 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14848 op0 = copy_to_mode_reg (mode0, op0);
14850 pat = GEN_FCN (icode) (op0);
14851 if (pat)
14852 emit_insn (pat);
14853 return NULL_RTX;
14855 case ALTIVEC_BUILTIN_DSSALL:
14856 emit_insn (gen_altivec_dssall ());
14857 return NULL_RTX;
14859 case ALTIVEC_BUILTIN_DSS:
14860 icode = CODE_FOR_altivec_dss;
14861 arg0 = CALL_EXPR_ARG (exp, 0);
14862 STRIP_NOPS (arg0);
14863 op0 = expand_normal (arg0);
14864 mode0 = insn_data[icode].operand[0].mode;
14866 /* If we got invalid arguments bail out before generating bad rtl. */
14867 if (arg0 == error_mark_node)
14868 return const0_rtx;
14870 if (TREE_CODE (arg0) != INTEGER_CST
14871 || TREE_INT_CST_LOW (arg0) & ~0x3)
14873 error ("argument to dss must be a 2-bit unsigned literal");
14874 return const0_rtx;
14877 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14878 op0 = copy_to_mode_reg (mode0, op0);
14880 emit_insn (gen_altivec_dss (op0));
14881 return NULL_RTX;
14883 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14884 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14885 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14886 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14887 case VSX_BUILTIN_VEC_INIT_V2DF:
14888 case VSX_BUILTIN_VEC_INIT_V2DI:
14889 case VSX_BUILTIN_VEC_INIT_V1TI:
14890 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14892 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14893 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14894 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14895 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14896 case VSX_BUILTIN_VEC_SET_V2DF:
14897 case VSX_BUILTIN_VEC_SET_V2DI:
14898 case VSX_BUILTIN_VEC_SET_V1TI:
14899 return altivec_expand_vec_set_builtin (exp);
14901 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14902 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14903 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14904 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14905 case VSX_BUILTIN_VEC_EXT_V2DF:
14906 case VSX_BUILTIN_VEC_EXT_V2DI:
14907 case VSX_BUILTIN_VEC_EXT_V1TI:
14908 return altivec_expand_vec_ext_builtin (exp, target);
14910 default:
14911 break;
14912 /* Fall through. */
14915 /* Expand abs* operations. */
14916 d = bdesc_abs;
14917 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14918 if (d->code == fcode)
14919 return altivec_expand_abs_builtin (d->icode, exp, target);
14921 /* Expand the AltiVec predicates. */
14922 d = bdesc_altivec_preds;
14923 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14924 if (d->code == fcode)
14925 return altivec_expand_predicate_builtin (d->icode, exp, target);
14927 /* LV* are funky. We initialized them differently. */
14928 switch (fcode)
14930 case ALTIVEC_BUILTIN_LVSL:
14931 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14932 exp, target, false);
14933 case ALTIVEC_BUILTIN_LVSR:
14934 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14935 exp, target, false);
14936 case ALTIVEC_BUILTIN_LVEBX:
14937 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14938 exp, target, false);
14939 case ALTIVEC_BUILTIN_LVEHX:
14940 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14941 exp, target, false);
14942 case ALTIVEC_BUILTIN_LVEWX:
14943 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14944 exp, target, false);
14945 case ALTIVEC_BUILTIN_LVXL_V2DF:
14946 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14947 exp, target, false);
14948 case ALTIVEC_BUILTIN_LVXL_V2DI:
14949 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14950 exp, target, false);
14951 case ALTIVEC_BUILTIN_LVXL_V4SF:
14952 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14953 exp, target, false);
14954 case ALTIVEC_BUILTIN_LVXL:
14955 case ALTIVEC_BUILTIN_LVXL_V4SI:
14956 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14957 exp, target, false);
14958 case ALTIVEC_BUILTIN_LVXL_V8HI:
14959 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14960 exp, target, false);
14961 case ALTIVEC_BUILTIN_LVXL_V16QI:
14962 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14963 exp, target, false);
14964 case ALTIVEC_BUILTIN_LVX_V2DF:
14965 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
14966 exp, target, false);
14967 case ALTIVEC_BUILTIN_LVX_V2DI:
14968 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
14969 exp, target, false);
14970 case ALTIVEC_BUILTIN_LVX_V4SF:
14971 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
14972 exp, target, false);
14973 case ALTIVEC_BUILTIN_LVX:
14974 case ALTIVEC_BUILTIN_LVX_V4SI:
14975 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
14976 exp, target, false);
14977 case ALTIVEC_BUILTIN_LVX_V8HI:
14978 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
14979 exp, target, false);
14980 case ALTIVEC_BUILTIN_LVX_V16QI:
14981 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
14982 exp, target, false);
14983 case ALTIVEC_BUILTIN_LVLX:
14984 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14985 exp, target, true);
14986 case ALTIVEC_BUILTIN_LVLXL:
14987 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14988 exp, target, true);
14989 case ALTIVEC_BUILTIN_LVRX:
14990 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14991 exp, target, true);
14992 case ALTIVEC_BUILTIN_LVRXL:
14993 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14994 exp, target, true);
14995 case VSX_BUILTIN_LXVD2X_V1TI:
14996 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14997 exp, target, false);
14998 case VSX_BUILTIN_LXVD2X_V2DF:
14999 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15000 exp, target, false);
15001 case VSX_BUILTIN_LXVD2X_V2DI:
15002 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15003 exp, target, false);
15004 case VSX_BUILTIN_LXVW4X_V4SF:
15005 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15006 exp, target, false);
15007 case VSX_BUILTIN_LXVW4X_V4SI:
15008 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15009 exp, target, false);
15010 case VSX_BUILTIN_LXVW4X_V8HI:
15011 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15012 exp, target, false);
15013 case VSX_BUILTIN_LXVW4X_V16QI:
15014 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15015 exp, target, false);
15016 /* For the following on big endian, it's ok to use any appropriate
15017 unaligned-supporting load, so use a generic expander. For
15018 little-endian, the exact element-reversing instruction must
15019 be used. */
15020 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15022 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15023 : CODE_FOR_vsx_ld_elemrev_v2df);
15024 return altivec_expand_lv_builtin (code, exp, target, false);
15026 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15028 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15029 : CODE_FOR_vsx_ld_elemrev_v2di);
15030 return altivec_expand_lv_builtin (code, exp, target, false);
15032 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15034 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15035 : CODE_FOR_vsx_ld_elemrev_v4sf);
15036 return altivec_expand_lv_builtin (code, exp, target, false);
15038 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15040 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15041 : CODE_FOR_vsx_ld_elemrev_v4si);
15042 return altivec_expand_lv_builtin (code, exp, target, false);
15044 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15046 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15047 : CODE_FOR_vsx_ld_elemrev_v8hi);
15048 return altivec_expand_lv_builtin (code, exp, target, false);
15050 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15052 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15053 : CODE_FOR_vsx_ld_elemrev_v16qi);
15054 return altivec_expand_lv_builtin (code, exp, target, false);
15056 break;
15057 default:
15058 break;
15059 /* Fall through. */
15062 *expandedp = false;
15063 return NULL_RTX;
15066 /* Expand the builtin in EXP and store the result in TARGET. Store
15067 true in *EXPANDEDP if we found a builtin to expand. */
15068 static rtx
15069 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15071 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15072 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15073 const struct builtin_description *d;
15074 size_t i;
15076 *expandedp = true;
15078 switch (fcode)
15080 case PAIRED_BUILTIN_STX:
15081 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15082 case PAIRED_BUILTIN_LX:
15083 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15084 default:
15085 break;
15086 /* Fall through. */
15089 /* Expand the paired predicates. */
15090 d = bdesc_paired_preds;
15091 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15092 if (d->code == fcode)
15093 return paired_expand_predicate_builtin (d->icode, exp, target);
15095 *expandedp = false;
15096 return NULL_RTX;
15099 /* Binops that need to be initialized manually, but can be expanded
15100 automagically by rs6000_expand_binop_builtin. */
15101 static const struct builtin_description bdesc_2arg_spe[] =
15103 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
15104 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
15105 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
15106 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
15107 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
15108 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
15109 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
15110 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
15111 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
15112 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
15113 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
15114 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
15115 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
15116 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
15117 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
15118 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
15119 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
15120 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
15121 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
15122 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
15123 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
15124 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
15127 /* Expand the builtin in EXP and store the result in TARGET. Store
15128 true in *EXPANDEDP if we found a builtin to expand.
15130 This expands the SPE builtins that are not simple unary and binary
15131 operations. */
15132 static rtx
15133 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
15135 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15136 tree arg1, arg0;
15137 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15138 enum insn_code icode;
15139 machine_mode tmode, mode0;
15140 rtx pat, op0;
15141 const struct builtin_description *d;
15142 size_t i;
15144 *expandedp = true;
15146 /* Syntax check for a 5-bit unsigned immediate. */
15147 switch (fcode)
15149 case SPE_BUILTIN_EVSTDD:
15150 case SPE_BUILTIN_EVSTDH:
15151 case SPE_BUILTIN_EVSTDW:
15152 case SPE_BUILTIN_EVSTWHE:
15153 case SPE_BUILTIN_EVSTWHO:
15154 case SPE_BUILTIN_EVSTWWE:
15155 case SPE_BUILTIN_EVSTWWO:
15156 arg1 = CALL_EXPR_ARG (exp, 2);
15157 if (TREE_CODE (arg1) != INTEGER_CST
15158 || TREE_INT_CST_LOW (arg1) & ~0x1f)
15160 error ("argument 2 must be a 5-bit unsigned literal");
15161 return const0_rtx;
15163 break;
15164 default:
15165 break;
15168 /* The evsplat*i instructions are not quite generic. */
15169 switch (fcode)
15171 case SPE_BUILTIN_EVSPLATFI:
15172 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
15173 exp, target);
15174 case SPE_BUILTIN_EVSPLATI:
15175 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
15176 exp, target);
15177 default:
15178 break;
15181 d = bdesc_2arg_spe;
15182 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
15183 if (d->code == fcode)
15184 return rs6000_expand_binop_builtin (d->icode, exp, target);
15186 d = bdesc_spe_predicates;
15187 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
15188 if (d->code == fcode)
15189 return spe_expand_predicate_builtin (d->icode, exp, target);
15191 d = bdesc_spe_evsel;
15192 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
15193 if (d->code == fcode)
15194 return spe_expand_evsel_builtin (d->icode, exp, target);
15196 switch (fcode)
15198 case SPE_BUILTIN_EVSTDDX:
15199 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
15200 case SPE_BUILTIN_EVSTDHX:
15201 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
15202 case SPE_BUILTIN_EVSTDWX:
15203 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
15204 case SPE_BUILTIN_EVSTWHEX:
15205 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
15206 case SPE_BUILTIN_EVSTWHOX:
15207 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
15208 case SPE_BUILTIN_EVSTWWEX:
15209 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
15210 case SPE_BUILTIN_EVSTWWOX:
15211 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
15212 case SPE_BUILTIN_EVSTDD:
15213 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
15214 case SPE_BUILTIN_EVSTDH:
15215 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
15216 case SPE_BUILTIN_EVSTDW:
15217 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
15218 case SPE_BUILTIN_EVSTWHE:
15219 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
15220 case SPE_BUILTIN_EVSTWHO:
15221 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
15222 case SPE_BUILTIN_EVSTWWE:
15223 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
15224 case SPE_BUILTIN_EVSTWWO:
15225 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
15226 case SPE_BUILTIN_MFSPEFSCR:
15227 icode = CODE_FOR_spe_mfspefscr;
15228 tmode = insn_data[icode].operand[0].mode;
15230 if (target == 0
15231 || GET_MODE (target) != tmode
15232 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15233 target = gen_reg_rtx (tmode);
15235 pat = GEN_FCN (icode) (target);
15236 if (! pat)
15237 return 0;
15238 emit_insn (pat);
15239 return target;
15240 case SPE_BUILTIN_MTSPEFSCR:
15241 icode = CODE_FOR_spe_mtspefscr;
15242 arg0 = CALL_EXPR_ARG (exp, 0);
15243 op0 = expand_normal (arg0);
15244 mode0 = insn_data[icode].operand[0].mode;
15246 if (arg0 == error_mark_node)
15247 return const0_rtx;
15249 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15250 op0 = copy_to_mode_reg (mode0, op0);
15252 pat = GEN_FCN (icode) (op0);
15253 if (pat)
15254 emit_insn (pat);
15255 return NULL_RTX;
15256 default:
15257 break;
15260 *expandedp = false;
15261 return NULL_RTX;
15264 static rtx
15265 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15267 rtx pat, scratch, tmp;
15268 tree form = CALL_EXPR_ARG (exp, 0);
15269 tree arg0 = CALL_EXPR_ARG (exp, 1);
15270 tree arg1 = CALL_EXPR_ARG (exp, 2);
15271 rtx op0 = expand_normal (arg0);
15272 rtx op1 = expand_normal (arg1);
15273 machine_mode mode0 = insn_data[icode].operand[1].mode;
15274 machine_mode mode1 = insn_data[icode].operand[2].mode;
15275 int form_int;
15276 enum rtx_code code;
15278 if (TREE_CODE (form) != INTEGER_CST)
15280 error ("argument 1 of __builtin_paired_predicate must be a constant");
15281 return const0_rtx;
15283 else
15284 form_int = TREE_INT_CST_LOW (form);
15286 gcc_assert (mode0 == mode1);
15288 if (arg0 == error_mark_node || arg1 == error_mark_node)
15289 return const0_rtx;
15291 if (target == 0
15292 || GET_MODE (target) != SImode
15293 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15294 target = gen_reg_rtx (SImode);
15295 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15296 op0 = copy_to_mode_reg (mode0, op0);
15297 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15298 op1 = copy_to_mode_reg (mode1, op1);
15300 scratch = gen_reg_rtx (CCFPmode);
15302 pat = GEN_FCN (icode) (scratch, op0, op1);
15303 if (!pat)
15304 return const0_rtx;
15306 emit_insn (pat);
15308 switch (form_int)
15310 /* LT bit. */
15311 case 0:
15312 code = LT;
15313 break;
15314 /* GT bit. */
15315 case 1:
15316 code = GT;
15317 break;
15318 /* EQ bit. */
15319 case 2:
15320 code = EQ;
15321 break;
15322 /* UN bit. */
15323 case 3:
15324 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15325 return target;
15326 default:
15327 error ("argument 1 of __builtin_paired_predicate is out of range");
15328 return const0_rtx;
15331 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15332 emit_move_insn (target, tmp);
15333 return target;
15336 static rtx
15337 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15339 rtx pat, scratch, tmp;
15340 tree form = CALL_EXPR_ARG (exp, 0);
15341 tree arg0 = CALL_EXPR_ARG (exp, 1);
15342 tree arg1 = CALL_EXPR_ARG (exp, 2);
15343 rtx op0 = expand_normal (arg0);
15344 rtx op1 = expand_normal (arg1);
15345 machine_mode mode0 = insn_data[icode].operand[1].mode;
15346 machine_mode mode1 = insn_data[icode].operand[2].mode;
15347 int form_int;
15348 enum rtx_code code;
15350 if (TREE_CODE (form) != INTEGER_CST)
15352 error ("argument 1 of __builtin_spe_predicate must be a constant");
15353 return const0_rtx;
15355 else
15356 form_int = TREE_INT_CST_LOW (form);
15358 gcc_assert (mode0 == mode1);
15360 if (arg0 == error_mark_node || arg1 == error_mark_node)
15361 return const0_rtx;
15363 if (target == 0
15364 || GET_MODE (target) != SImode
15365 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
15366 target = gen_reg_rtx (SImode);
15368 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15369 op0 = copy_to_mode_reg (mode0, op0);
15370 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15371 op1 = copy_to_mode_reg (mode1, op1);
15373 scratch = gen_reg_rtx (CCmode);
15375 pat = GEN_FCN (icode) (scratch, op0, op1);
15376 if (! pat)
15377 return const0_rtx;
15378 emit_insn (pat);
15380 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
15381 _lower_. We use one compare, but look in different bits of the
15382 CR for each variant.
15384 There are 2 elements in each SPE simd type (upper/lower). The CR
15385 bits are set as follows:
15387 BIT0 | BIT 1 | BIT 2 | BIT 3
15388 U | L | (U | L) | (U & L)
15390 So, for an "all" relationship, BIT 3 would be set.
15391 For an "any" relationship, BIT 2 would be set. Etc.
15393 Following traditional nomenclature, these bits map to:
15395 BIT0 | BIT 1 | BIT 2 | BIT 3
15396 LT | GT | EQ | OV
15398 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
15401 switch (form_int)
15403 /* All variant. OV bit. */
15404 case 0:
15405 /* We need to get to the OV bit, which is the ORDERED bit. We
15406 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
15407 that's ugly and will make validate_condition_mode die.
15408 So let's just use another pattern. */
15409 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15410 return target;
15411 /* Any variant. EQ bit. */
15412 case 1:
15413 code = EQ;
15414 break;
15415 /* Upper variant. LT bit. */
15416 case 2:
15417 code = LT;
15418 break;
15419 /* Lower variant. GT bit. */
15420 case 3:
15421 code = GT;
15422 break;
15423 default:
15424 error ("argument 1 of __builtin_spe_predicate is out of range");
15425 return const0_rtx;
15428 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15429 emit_move_insn (target, tmp);
15431 return target;
15434 /* The evsel builtins look like this:
15436 e = __builtin_spe_evsel_OP (a, b, c, d);
15438 and work like this:
15440 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
15441 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
15444 static rtx
15445 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
15447 rtx pat, scratch;
15448 tree arg0 = CALL_EXPR_ARG (exp, 0);
15449 tree arg1 = CALL_EXPR_ARG (exp, 1);
15450 tree arg2 = CALL_EXPR_ARG (exp, 2);
15451 tree arg3 = CALL_EXPR_ARG (exp, 3);
15452 rtx op0 = expand_normal (arg0);
15453 rtx op1 = expand_normal (arg1);
15454 rtx op2 = expand_normal (arg2);
15455 rtx op3 = expand_normal (arg3);
15456 machine_mode mode0 = insn_data[icode].operand[1].mode;
15457 machine_mode mode1 = insn_data[icode].operand[2].mode;
15459 gcc_assert (mode0 == mode1);
15461 if (arg0 == error_mark_node || arg1 == error_mark_node
15462 || arg2 == error_mark_node || arg3 == error_mark_node)
15463 return const0_rtx;
15465 if (target == 0
15466 || GET_MODE (target) != mode0
15467 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
15468 target = gen_reg_rtx (mode0);
15470 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15471 op0 = copy_to_mode_reg (mode0, op0);
15472 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15473 op1 = copy_to_mode_reg (mode0, op1);
15474 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
15475 op2 = copy_to_mode_reg (mode0, op2);
15476 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
15477 op3 = copy_to_mode_reg (mode0, op3);
15479 /* Generate the compare. */
15480 scratch = gen_reg_rtx (CCmode);
15481 pat = GEN_FCN (icode) (scratch, op0, op1);
15482 if (! pat)
15483 return const0_rtx;
15484 emit_insn (pat);
15486 if (mode0 == V2SImode)
15487 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
15488 else
15489 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
15491 return target;
15494 /* Raise an error message for a builtin function that is called without the
15495 appropriate target options being set. */
15497 static void
15498 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15500 size_t uns_fncode = (size_t)fncode;
15501 const char *name = rs6000_builtin_info[uns_fncode].name;
15502 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15504 gcc_assert (name != NULL);
15505 if ((fnmask & RS6000_BTM_CELL) != 0)
15506 error ("Builtin function %s is only valid for the cell processor", name);
15507 else if ((fnmask & RS6000_BTM_VSX) != 0)
15508 error ("Builtin function %s requires the -mvsx option", name);
15509 else if ((fnmask & RS6000_BTM_HTM) != 0)
15510 error ("Builtin function %s requires the -mhtm option", name);
15511 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15512 error ("Builtin function %s requires the -maltivec option", name);
15513 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
15514 error ("Builtin function %s requires the -mpaired option", name);
15515 else if ((fnmask & RS6000_BTM_SPE) != 0)
15516 error ("Builtin function %s requires the -mspe option", name);
15517 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15518 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15519 error ("Builtin function %s requires the -mhard-dfp and"
15520 " -mpower8-vector options", name);
15521 else if ((fnmask & RS6000_BTM_DFP) != 0)
15522 error ("Builtin function %s requires the -mhard-dfp option", name);
15523 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15524 error ("Builtin function %s requires the -mpower8-vector option", name);
15525 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15526 error ("Builtin function %s requires the -mcpu=power9 option", name);
15527 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15528 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15529 error ("Builtin function %s requires the -mcpu=power9 and"
15530 " -m64 options", name);
15531 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15532 error ("Builtin function %s requires the -mcpu=power9 option", name);
15533 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15534 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15535 error ("Builtin function %s requires the -mhard-float and"
15536 " -mlong-double-128 options", name);
15537 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15538 error ("Builtin function %s requires the -mhard-float option", name);
15539 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15540 error ("Builtin function %s requires the -mfloat128 option", name);
15541 else
15542 error ("Builtin function %s is not supported with the current options",
15543 name);
15546 /* Target hook for early folding of built-ins, shamelessly stolen
15547 from ia64.c. */
15549 static tree
15550 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
15551 tree *args, bool ignore ATTRIBUTE_UNUSED)
15553 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
15555 enum rs6000_builtins fn_code
15556 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15557 switch (fn_code)
15559 case RS6000_BUILTIN_NANQ:
15560 case RS6000_BUILTIN_NANSQ:
15562 tree type = TREE_TYPE (TREE_TYPE (fndecl));
15563 const char *str = c_getstr (*args);
15564 int quiet = fn_code == RS6000_BUILTIN_NANQ;
15565 REAL_VALUE_TYPE real;
15567 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
15568 return build_real (type, real);
15569 return NULL_TREE;
15571 case RS6000_BUILTIN_INFQ:
15572 case RS6000_BUILTIN_HUGE_VALQ:
15574 tree type = TREE_TYPE (TREE_TYPE (fndecl));
15575 REAL_VALUE_TYPE inf;
15576 real_inf (&inf);
15577 return build_real (type, inf);
15579 default:
15580 break;
15583 #ifdef SUBTARGET_FOLD_BUILTIN
15584 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15585 #else
15586 return NULL_TREE;
15587 #endif
15590 /* Expand an expression EXP that calls a built-in function,
15591 with result going to TARGET if that's convenient
15592 (and in mode MODE if that's convenient).
15593 SUBTARGET may be used as the target for computing one of EXP's operands.
15594 IGNORE is nonzero if the value is to be ignored. */
15596 static rtx
15597 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15598 machine_mode mode ATTRIBUTE_UNUSED,
15599 int ignore ATTRIBUTE_UNUSED)
15601 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15602 enum rs6000_builtins fcode
15603 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15604 size_t uns_fcode = (size_t)fcode;
15605 const struct builtin_description *d;
15606 size_t i;
15607 rtx ret;
15608 bool success;
15609 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15610 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15612 if (TARGET_DEBUG_BUILTIN)
15614 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15615 const char *name1 = rs6000_builtin_info[uns_fcode].name;
15616 const char *name2 = ((icode != CODE_FOR_nothing)
15617 ? get_insn_name ((int)icode)
15618 : "nothing");
15619 const char *name3;
15621 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
15623 default: name3 = "unknown"; break;
15624 case RS6000_BTC_SPECIAL: name3 = "special"; break;
15625 case RS6000_BTC_UNARY: name3 = "unary"; break;
15626 case RS6000_BTC_BINARY: name3 = "binary"; break;
15627 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
15628 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
15629 case RS6000_BTC_ABS: name3 = "abs"; break;
15630 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
15631 case RS6000_BTC_DST: name3 = "dst"; break;
15635 fprintf (stderr,
15636 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
15637 (name1) ? name1 : "---", fcode,
15638 (name2) ? name2 : "---", (int)icode,
15639 name3,
15640 func_valid_p ? "" : ", not valid");
15643 if (!func_valid_p)
15645 rs6000_invalid_builtin (fcode);
15647 /* Given it is invalid, just generate a normal call. */
15648 return expand_call (exp, target, ignore);
15651 switch (fcode)
15653 case RS6000_BUILTIN_RECIP:
15654 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
15656 case RS6000_BUILTIN_RECIPF:
15657 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
15659 case RS6000_BUILTIN_RSQRTF:
15660 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
15662 case RS6000_BUILTIN_RSQRT:
15663 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
15665 case POWER7_BUILTIN_BPERMD:
15666 return rs6000_expand_binop_builtin (((TARGET_64BIT)
15667 ? CODE_FOR_bpermd_di
15668 : CODE_FOR_bpermd_si), exp, target);
15670 case RS6000_BUILTIN_GET_TB:
15671 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
15672 target);
15674 case RS6000_BUILTIN_MFTB:
15675 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
15676 ? CODE_FOR_rs6000_mftb_di
15677 : CODE_FOR_rs6000_mftb_si),
15678 target);
15680 case RS6000_BUILTIN_MFFS:
15681 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
15683 case RS6000_BUILTIN_MTFSF:
15684 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
15686 case RS6000_BUILTIN_CPU_INIT:
15687 case RS6000_BUILTIN_CPU_IS:
15688 case RS6000_BUILTIN_CPU_SUPPORTS:
15689 return cpu_expand_builtin (fcode, exp, target);
15691 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
15692 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
15694 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
15695 : (int) CODE_FOR_altivec_lvsl_direct);
15696 machine_mode tmode = insn_data[icode].operand[0].mode;
15697 machine_mode mode = insn_data[icode].operand[1].mode;
15698 tree arg;
15699 rtx op, addr, pat;
15701 gcc_assert (TARGET_ALTIVEC);
15703 arg = CALL_EXPR_ARG (exp, 0);
15704 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
15705 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
15706 addr = memory_address (mode, op);
15707 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
15708 op = addr;
15709 else
15711 /* For the load case need to negate the address. */
15712 op = gen_reg_rtx (GET_MODE (addr));
15713 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
15715 op = gen_rtx_MEM (mode, op);
15717 if (target == 0
15718 || GET_MODE (target) != tmode
15719 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15720 target = gen_reg_rtx (tmode);
15722 pat = GEN_FCN (icode) (target, op);
15723 if (!pat)
15724 return 0;
15725 emit_insn (pat);
15727 return target;
15730 case ALTIVEC_BUILTIN_VCFUX:
15731 case ALTIVEC_BUILTIN_VCFSX:
15732 case ALTIVEC_BUILTIN_VCTUXS:
15733 case ALTIVEC_BUILTIN_VCTSXS:
15734 /* FIXME: There's got to be a nicer way to handle this case than
15735 constructing a new CALL_EXPR. */
15736 if (call_expr_nargs (exp) == 1)
15738 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
15739 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
15741 break;
15743 default:
15744 break;
15747 if (TARGET_ALTIVEC)
15749 ret = altivec_expand_builtin (exp, target, &success);
15751 if (success)
15752 return ret;
15754 if (TARGET_SPE)
15756 ret = spe_expand_builtin (exp, target, &success);
15758 if (success)
15759 return ret;
15761 if (TARGET_PAIRED_FLOAT)
15763 ret = paired_expand_builtin (exp, target, &success);
15765 if (success)
15766 return ret;
15768 if (TARGET_HTM)
15770 ret = htm_expand_builtin (exp, target, &success);
15772 if (success)
15773 return ret;
15776 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
15777 /* RS6000_BTC_SPECIAL represents no-operand operators. */
15778 gcc_assert (attr == RS6000_BTC_UNARY
15779 || attr == RS6000_BTC_BINARY
15780 || attr == RS6000_BTC_TERNARY
15781 || attr == RS6000_BTC_SPECIAL);
15783 /* Handle simple unary operations. */
15784 d = bdesc_1arg;
15785 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15786 if (d->code == fcode)
15787 return rs6000_expand_unop_builtin (d->icode, exp, target);
15789 /* Handle simple binary operations. */
15790 d = bdesc_2arg;
15791 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15792 if (d->code == fcode)
15793 return rs6000_expand_binop_builtin (d->icode, exp, target);
15795 /* Handle simple ternary operations. */
15796 d = bdesc_3arg;
15797 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
15798 if (d->code == fcode)
15799 return rs6000_expand_ternop_builtin (d->icode, exp, target);
15801 /* Handle simple no-argument operations. */
15802 d = bdesc_0arg;
15803 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
15804 if (d->code == fcode)
15805 return rs6000_expand_zeroop_builtin (d->icode, target);
15807 gcc_unreachable ();
15810 static void
15811 rs6000_init_builtins (void)
15813 tree tdecl;
15814 tree ftype;
15815 machine_mode mode;
15817 if (TARGET_DEBUG_BUILTIN)
15818 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
15819 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
15820 (TARGET_SPE) ? ", spe" : "",
15821 (TARGET_ALTIVEC) ? ", altivec" : "",
15822 (TARGET_VSX) ? ", vsx" : "");
15824 V2SI_type_node = build_vector_type (intSI_type_node, 2);
15825 V2SF_type_node = build_vector_type (float_type_node, 2);
15826 V2DI_type_node = build_vector_type (intDI_type_node, 2);
15827 V2DF_type_node = build_vector_type (double_type_node, 2);
15828 V4HI_type_node = build_vector_type (intHI_type_node, 4);
15829 V4SI_type_node = build_vector_type (intSI_type_node, 4);
15830 V4SF_type_node = build_vector_type (float_type_node, 4);
15831 V8HI_type_node = build_vector_type (intHI_type_node, 8);
15832 V16QI_type_node = build_vector_type (intQI_type_node, 16);
15834 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
15835 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
15836 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
15837 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
15839 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
15840 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
15841 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
15842 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
15844 const_str_type_node
15845 = build_pointer_type (build_qualified_type (char_type_node,
15846 TYPE_QUAL_CONST));
15848 /* We use V1TI mode as a special container to hold __int128_t items that
15849 must live in VSX registers. */
15850 if (intTI_type_node)
15852 V1TI_type_node = build_vector_type (intTI_type_node, 1);
15853 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
15856 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
15857 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
15858 'vector unsigned short'. */
15860 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
15861 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
15862 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
15863 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
15864 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
15866 long_integer_type_internal_node = long_integer_type_node;
15867 long_unsigned_type_internal_node = long_unsigned_type_node;
15868 long_long_integer_type_internal_node = long_long_integer_type_node;
15869 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
15870 intQI_type_internal_node = intQI_type_node;
15871 uintQI_type_internal_node = unsigned_intQI_type_node;
15872 intHI_type_internal_node = intHI_type_node;
15873 uintHI_type_internal_node = unsigned_intHI_type_node;
15874 intSI_type_internal_node = intSI_type_node;
15875 uintSI_type_internal_node = unsigned_intSI_type_node;
15876 intDI_type_internal_node = intDI_type_node;
15877 uintDI_type_internal_node = unsigned_intDI_type_node;
15878 intTI_type_internal_node = intTI_type_node;
15879 uintTI_type_internal_node = unsigned_intTI_type_node;
15880 float_type_internal_node = float_type_node;
15881 double_type_internal_node = double_type_node;
15882 long_double_type_internal_node = long_double_type_node;
15883 dfloat64_type_internal_node = dfloat64_type_node;
15884 dfloat128_type_internal_node = dfloat128_type_node;
15885 void_type_internal_node = void_type_node;
15887 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
15888 IFmode is the IBM extended 128-bit format that is a pair of doubles.
15889 TFmode will be either IEEE 128-bit floating point or the IBM double-double
15890 format that uses a pair of doubles, depending on the switches and
15891 defaults. */
15892 if (TARGET_FLOAT128)
15894 ibm128_float_type_node = make_node (REAL_TYPE);
15895 TYPE_PRECISION (ibm128_float_type_node) = 128;
15896 layout_type (ibm128_float_type_node);
15897 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
15899 ieee128_float_type_node = make_node (REAL_TYPE);
15900 TYPE_PRECISION (ieee128_float_type_node) = 128;
15901 layout_type (ieee128_float_type_node);
15902 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
15904 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
15905 "__float128");
15907 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
15908 "__ibm128");
15910 else
15912 /* All types must be nonzero, or self-test barfs during bootstrap. */
15913 ieee128_float_type_node = long_double_type_node;
15914 ibm128_float_type_node = long_double_type_node;
15917 /* Initialize the modes for builtin_function_type, mapping a machine mode to
15918 tree type node. */
15919 builtin_mode_to_type[QImode][0] = integer_type_node;
15920 builtin_mode_to_type[HImode][0] = integer_type_node;
15921 builtin_mode_to_type[SImode][0] = intSI_type_node;
15922 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
15923 builtin_mode_to_type[DImode][0] = intDI_type_node;
15924 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
15925 builtin_mode_to_type[TImode][0] = intTI_type_node;
15926 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
15927 builtin_mode_to_type[SFmode][0] = float_type_node;
15928 builtin_mode_to_type[DFmode][0] = double_type_node;
15929 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
15930 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
15931 builtin_mode_to_type[TFmode][0] = long_double_type_node;
15932 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
15933 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
15934 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
15935 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
15936 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
15937 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
15938 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
15939 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
15940 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
15941 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
15942 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
15943 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
15944 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
15945 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
15946 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
15947 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
15948 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
15950 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
15951 TYPE_NAME (bool_char_type_node) = tdecl;
15953 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
15954 TYPE_NAME (bool_short_type_node) = tdecl;
15956 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
15957 TYPE_NAME (bool_int_type_node) = tdecl;
15959 tdecl = add_builtin_type ("__pixel", pixel_type_node);
15960 TYPE_NAME (pixel_type_node) = tdecl;
15962 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
15963 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
15964 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
15965 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
15966 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
15968 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
15969 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
15971 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
15972 TYPE_NAME (V16QI_type_node) = tdecl;
15974 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
15975 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
15977 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
15978 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
15980 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
15981 TYPE_NAME (V8HI_type_node) = tdecl;
15983 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
15984 TYPE_NAME (bool_V8HI_type_node) = tdecl;
15986 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
15987 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
15989 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
15990 TYPE_NAME (V4SI_type_node) = tdecl;
15992 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
15993 TYPE_NAME (bool_V4SI_type_node) = tdecl;
15995 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
15996 TYPE_NAME (V4SF_type_node) = tdecl;
15998 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
15999 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
16001 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
16002 TYPE_NAME (V2DF_type_node) = tdecl;
16004 if (TARGET_POWERPC64)
16006 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
16007 TYPE_NAME (V2DI_type_node) = tdecl;
16009 tdecl = add_builtin_type ("__vector unsigned long",
16010 unsigned_V2DI_type_node);
16011 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
16013 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
16014 TYPE_NAME (bool_V2DI_type_node) = tdecl;
16016 else
16018 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
16019 TYPE_NAME (V2DI_type_node) = tdecl;
16021 tdecl = add_builtin_type ("__vector unsigned long long",
16022 unsigned_V2DI_type_node);
16023 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
16025 tdecl = add_builtin_type ("__vector __bool long long",
16026 bool_V2DI_type_node);
16027 TYPE_NAME (bool_V2DI_type_node) = tdecl;
16030 if (V1TI_type_node)
16032 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
16033 TYPE_NAME (V1TI_type_node) = tdecl;
16035 tdecl = add_builtin_type ("__vector unsigned __int128",
16036 unsigned_V1TI_type_node);
16037 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
16040 /* Paired and SPE builtins are only available if you build a compiler with
16041 the appropriate options, so only create those builtins with the
16042 appropriate compiler option. Create Altivec and VSX builtins on machines
16043 with at least the general purpose extensions (970 and newer) to allow the
16044 use of the target attribute. */
16045 if (TARGET_PAIRED_FLOAT)
16046 paired_init_builtins ();
16047 if (TARGET_SPE)
16048 spe_init_builtins ();
16049 if (TARGET_EXTRA_BUILTINS)
16050 altivec_init_builtins ();
16051 if (TARGET_HTM)
16052 htm_init_builtins ();
16054 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
16055 rs6000_common_init_builtins ();
16057 ftype = build_function_type_list (ieee128_float_type_node,
16058 const_str_type_node, NULL_TREE);
16059 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
16060 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
16062 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
16063 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
16064 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
16066 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16067 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16068 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16070 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16071 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16072 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16074 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16075 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16076 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16078 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16079 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16080 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16082 mode = (TARGET_64BIT) ? DImode : SImode;
16083 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16084 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16085 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16087 ftype = build_function_type_list (unsigned_intDI_type_node,
16088 NULL_TREE);
16089 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16091 if (TARGET_64BIT)
16092 ftype = build_function_type_list (unsigned_intDI_type_node,
16093 NULL_TREE);
16094 else
16095 ftype = build_function_type_list (unsigned_intSI_type_node,
16096 NULL_TREE);
16097 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16099 ftype = build_function_type_list (double_type_node, NULL_TREE);
16100 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16102 ftype = build_function_type_list (void_type_node,
16103 intSI_type_node, double_type_node,
16104 NULL_TREE);
16105 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16107 ftype = build_function_type_list (void_type_node, NULL_TREE);
16108 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16110 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16111 NULL_TREE);
16112 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16113 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16115 #if TARGET_XCOFF
16116 /* AIX libm provides clog as __clog. */
16117 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16118 set_user_assembler_name (tdecl, "__clog");
16119 #endif
16121 #ifdef SUBTARGET_INIT_BUILTINS
16122 SUBTARGET_INIT_BUILTINS;
16123 #endif
16126 /* Returns the rs6000 builtin decl for CODE. */
16128 static tree
16129 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16131 HOST_WIDE_INT fnmask;
16133 if (code >= RS6000_BUILTIN_COUNT)
16134 return error_mark_node;
16136 fnmask = rs6000_builtin_info[code].mask;
16137 if ((fnmask & rs6000_builtin_mask) != fnmask)
16139 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16140 return error_mark_node;
16143 return rs6000_builtin_decls[code];
16146 static void
16147 spe_init_builtins (void)
16149 tree puint_type_node = build_pointer_type (unsigned_type_node);
16150 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
16151 const struct builtin_description *d;
16152 size_t i;
16154 tree v2si_ftype_4_v2si
16155 = build_function_type_list (opaque_V2SI_type_node,
16156 opaque_V2SI_type_node,
16157 opaque_V2SI_type_node,
16158 opaque_V2SI_type_node,
16159 opaque_V2SI_type_node,
16160 NULL_TREE);
16162 tree v2sf_ftype_4_v2sf
16163 = build_function_type_list (opaque_V2SF_type_node,
16164 opaque_V2SF_type_node,
16165 opaque_V2SF_type_node,
16166 opaque_V2SF_type_node,
16167 opaque_V2SF_type_node,
16168 NULL_TREE);
16170 tree int_ftype_int_v2si_v2si
16171 = build_function_type_list (integer_type_node,
16172 integer_type_node,
16173 opaque_V2SI_type_node,
16174 opaque_V2SI_type_node,
16175 NULL_TREE);
16177 tree int_ftype_int_v2sf_v2sf
16178 = build_function_type_list (integer_type_node,
16179 integer_type_node,
16180 opaque_V2SF_type_node,
16181 opaque_V2SF_type_node,
16182 NULL_TREE);
16184 tree void_ftype_v2si_puint_int
16185 = build_function_type_list (void_type_node,
16186 opaque_V2SI_type_node,
16187 puint_type_node,
16188 integer_type_node,
16189 NULL_TREE);
16191 tree void_ftype_v2si_puint_char
16192 = build_function_type_list (void_type_node,
16193 opaque_V2SI_type_node,
16194 puint_type_node,
16195 char_type_node,
16196 NULL_TREE);
16198 tree void_ftype_v2si_pv2si_int
16199 = build_function_type_list (void_type_node,
16200 opaque_V2SI_type_node,
16201 opaque_p_V2SI_type_node,
16202 integer_type_node,
16203 NULL_TREE);
16205 tree void_ftype_v2si_pv2si_char
16206 = build_function_type_list (void_type_node,
16207 opaque_V2SI_type_node,
16208 opaque_p_V2SI_type_node,
16209 char_type_node,
16210 NULL_TREE);
16212 tree void_ftype_int
16213 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16215 tree int_ftype_void
16216 = build_function_type_list (integer_type_node, NULL_TREE);
16218 tree v2si_ftype_pv2si_int
16219 = build_function_type_list (opaque_V2SI_type_node,
16220 opaque_p_V2SI_type_node,
16221 integer_type_node,
16222 NULL_TREE);
16224 tree v2si_ftype_puint_int
16225 = build_function_type_list (opaque_V2SI_type_node,
16226 puint_type_node,
16227 integer_type_node,
16228 NULL_TREE);
16230 tree v2si_ftype_pushort_int
16231 = build_function_type_list (opaque_V2SI_type_node,
16232 pushort_type_node,
16233 integer_type_node,
16234 NULL_TREE);
16236 tree v2si_ftype_signed_char
16237 = build_function_type_list (opaque_V2SI_type_node,
16238 signed_char_type_node,
16239 NULL_TREE);
16241 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
16243 /* Initialize irregular SPE builtins. */
16245 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
16246 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
16247 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
16248 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
16249 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
16250 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
16251 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
16252 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
16253 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
16254 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
16255 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
16256 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
16257 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
16258 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
16259 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
16260 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
16261 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
16262 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
16264 /* Loads. */
16265 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
16266 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
16267 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
16268 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
16269 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
16270 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
16271 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
16272 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
16273 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
16274 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
16275 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
16276 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
16277 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
16278 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
16279 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
16280 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
16281 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
16282 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
16283 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
16284 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
16285 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
16286 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
16288 /* Predicates. */
16289 d = bdesc_spe_predicates;
16290 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
16292 tree type;
16294 switch (insn_data[d->icode].operand[1].mode)
16296 case V2SImode:
16297 type = int_ftype_int_v2si_v2si;
16298 break;
16299 case V2SFmode:
16300 type = int_ftype_int_v2sf_v2sf;
16301 break;
16302 default:
16303 gcc_unreachable ();
16306 def_builtin (d->name, type, d->code);
16309 /* Evsel predicates. */
16310 d = bdesc_spe_evsel;
16311 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
16313 tree type;
16315 switch (insn_data[d->icode].operand[1].mode)
16317 case V2SImode:
16318 type = v2si_ftype_4_v2si;
16319 break;
16320 case V2SFmode:
16321 type = v2sf_ftype_4_v2sf;
16322 break;
16323 default:
16324 gcc_unreachable ();
16327 def_builtin (d->name, type, d->code);
16331 static void
16332 paired_init_builtins (void)
16334 const struct builtin_description *d;
16335 size_t i;
16337 tree int_ftype_int_v2sf_v2sf
16338 = build_function_type_list (integer_type_node,
16339 integer_type_node,
16340 V2SF_type_node,
16341 V2SF_type_node,
16342 NULL_TREE);
16343 tree pcfloat_type_node =
16344 build_pointer_type (build_qualified_type
16345 (float_type_node, TYPE_QUAL_CONST));
16347 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
16348 long_integer_type_node,
16349 pcfloat_type_node,
16350 NULL_TREE);
16351 tree void_ftype_v2sf_long_pcfloat =
16352 build_function_type_list (void_type_node,
16353 V2SF_type_node,
16354 long_integer_type_node,
16355 pcfloat_type_node,
16356 NULL_TREE);
16359 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
16360 PAIRED_BUILTIN_LX);
16363 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
16364 PAIRED_BUILTIN_STX);
16366 /* Predicates. */
16367 d = bdesc_paired_preds;
16368 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
16370 tree type;
16372 if (TARGET_DEBUG_BUILTIN)
16373 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
16374 (int)i, get_insn_name (d->icode), (int)d->icode,
16375 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
16377 switch (insn_data[d->icode].operand[1].mode)
16379 case V2SFmode:
16380 type = int_ftype_int_v2sf_v2sf;
16381 break;
16382 default:
16383 gcc_unreachable ();
16386 def_builtin (d->name, type, d->code);
16390 static void
16391 altivec_init_builtins (void)
16393 const struct builtin_description *d;
16394 size_t i;
16395 tree ftype;
16396 tree decl;
16398 tree pvoid_type_node = build_pointer_type (void_type_node);
16400 tree pcvoid_type_node
16401 = build_pointer_type (build_qualified_type (void_type_node,
16402 TYPE_QUAL_CONST));
16404 tree int_ftype_opaque
16405 = build_function_type_list (integer_type_node,
16406 opaque_V4SI_type_node, NULL_TREE);
16407 tree opaque_ftype_opaque
16408 = build_function_type_list (integer_type_node, NULL_TREE);
16409 tree opaque_ftype_opaque_int
16410 = build_function_type_list (opaque_V4SI_type_node,
16411 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16412 tree opaque_ftype_opaque_opaque_int
16413 = build_function_type_list (opaque_V4SI_type_node,
16414 opaque_V4SI_type_node, opaque_V4SI_type_node,
16415 integer_type_node, NULL_TREE);
16416 tree opaque_ftype_opaque_opaque_opaque
16417 = build_function_type_list (opaque_V4SI_type_node,
16418 opaque_V4SI_type_node, opaque_V4SI_type_node,
16419 opaque_V4SI_type_node, NULL_TREE);
16420 tree opaque_ftype_opaque_opaque
16421 = build_function_type_list (opaque_V4SI_type_node,
16422 opaque_V4SI_type_node, opaque_V4SI_type_node,
16423 NULL_TREE);
16424 tree int_ftype_int_opaque_opaque
16425 = build_function_type_list (integer_type_node,
16426 integer_type_node, opaque_V4SI_type_node,
16427 opaque_V4SI_type_node, NULL_TREE);
16428 tree int_ftype_int_v4si_v4si
16429 = build_function_type_list (integer_type_node,
16430 integer_type_node, V4SI_type_node,
16431 V4SI_type_node, NULL_TREE);
16432 tree int_ftype_int_v2di_v2di
16433 = build_function_type_list (integer_type_node,
16434 integer_type_node, V2DI_type_node,
16435 V2DI_type_node, NULL_TREE);
16436 tree void_ftype_v4si
16437 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16438 tree v8hi_ftype_void
16439 = build_function_type_list (V8HI_type_node, NULL_TREE);
16440 tree void_ftype_void
16441 = build_function_type_list (void_type_node, NULL_TREE);
16442 tree void_ftype_int
16443 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16445 tree opaque_ftype_long_pcvoid
16446 = build_function_type_list (opaque_V4SI_type_node,
16447 long_integer_type_node, pcvoid_type_node,
16448 NULL_TREE);
16449 tree v16qi_ftype_long_pcvoid
16450 = build_function_type_list (V16QI_type_node,
16451 long_integer_type_node, pcvoid_type_node,
16452 NULL_TREE);
16453 tree v8hi_ftype_long_pcvoid
16454 = build_function_type_list (V8HI_type_node,
16455 long_integer_type_node, pcvoid_type_node,
16456 NULL_TREE);
16457 tree v4si_ftype_long_pcvoid
16458 = build_function_type_list (V4SI_type_node,
16459 long_integer_type_node, pcvoid_type_node,
16460 NULL_TREE);
16461 tree v4sf_ftype_long_pcvoid
16462 = build_function_type_list (V4SF_type_node,
16463 long_integer_type_node, pcvoid_type_node,
16464 NULL_TREE);
16465 tree v2df_ftype_long_pcvoid
16466 = build_function_type_list (V2DF_type_node,
16467 long_integer_type_node, pcvoid_type_node,
16468 NULL_TREE);
16469 tree v2di_ftype_long_pcvoid
16470 = build_function_type_list (V2DI_type_node,
16471 long_integer_type_node, pcvoid_type_node,
16472 NULL_TREE);
16474 tree void_ftype_opaque_long_pvoid
16475 = build_function_type_list (void_type_node,
16476 opaque_V4SI_type_node, long_integer_type_node,
16477 pvoid_type_node, NULL_TREE);
16478 tree void_ftype_v4si_long_pvoid
16479 = build_function_type_list (void_type_node,
16480 V4SI_type_node, long_integer_type_node,
16481 pvoid_type_node, NULL_TREE);
16482 tree void_ftype_v16qi_long_pvoid
16483 = build_function_type_list (void_type_node,
16484 V16QI_type_node, long_integer_type_node,
16485 pvoid_type_node, NULL_TREE);
16486 tree void_ftype_v8hi_long_pvoid
16487 = build_function_type_list (void_type_node,
16488 V8HI_type_node, long_integer_type_node,
16489 pvoid_type_node, NULL_TREE);
16490 tree void_ftype_v4sf_long_pvoid
16491 = build_function_type_list (void_type_node,
16492 V4SF_type_node, long_integer_type_node,
16493 pvoid_type_node, NULL_TREE);
16494 tree void_ftype_v2df_long_pvoid
16495 = build_function_type_list (void_type_node,
16496 V2DF_type_node, long_integer_type_node,
16497 pvoid_type_node, NULL_TREE);
16498 tree void_ftype_v2di_long_pvoid
16499 = build_function_type_list (void_type_node,
16500 V2DI_type_node, long_integer_type_node,
16501 pvoid_type_node, NULL_TREE);
16502 tree int_ftype_int_v8hi_v8hi
16503 = build_function_type_list (integer_type_node,
16504 integer_type_node, V8HI_type_node,
16505 V8HI_type_node, NULL_TREE);
16506 tree int_ftype_int_v16qi_v16qi
16507 = build_function_type_list (integer_type_node,
16508 integer_type_node, V16QI_type_node,
16509 V16QI_type_node, NULL_TREE);
16510 tree int_ftype_int_v4sf_v4sf
16511 = build_function_type_list (integer_type_node,
16512 integer_type_node, V4SF_type_node,
16513 V4SF_type_node, NULL_TREE);
16514 tree int_ftype_int_v2df_v2df
16515 = build_function_type_list (integer_type_node,
16516 integer_type_node, V2DF_type_node,
16517 V2DF_type_node, NULL_TREE);
16518 tree v2di_ftype_v2di
16519 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16520 tree v4si_ftype_v4si
16521 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16522 tree v8hi_ftype_v8hi
16523 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16524 tree v16qi_ftype_v16qi
16525 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16526 tree v4sf_ftype_v4sf
16527 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16528 tree v2df_ftype_v2df
16529 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16530 tree void_ftype_pcvoid_int_int
16531 = build_function_type_list (void_type_node,
16532 pcvoid_type_node, integer_type_node,
16533 integer_type_node, NULL_TREE);
16535 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16536 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16537 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16538 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16539 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16540 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16541 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16542 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16543 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16544 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16545 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16546 ALTIVEC_BUILTIN_LVXL_V2DF);
16547 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16548 ALTIVEC_BUILTIN_LVXL_V2DI);
16549 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16550 ALTIVEC_BUILTIN_LVXL_V4SF);
16551 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16552 ALTIVEC_BUILTIN_LVXL_V4SI);
16553 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16554 ALTIVEC_BUILTIN_LVXL_V8HI);
16555 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16556 ALTIVEC_BUILTIN_LVXL_V16QI);
16557 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16558 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16559 ALTIVEC_BUILTIN_LVX_V2DF);
16560 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16561 ALTIVEC_BUILTIN_LVX_V2DI);
16562 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16563 ALTIVEC_BUILTIN_LVX_V4SF);
16564 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16565 ALTIVEC_BUILTIN_LVX_V4SI);
16566 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16567 ALTIVEC_BUILTIN_LVX_V8HI);
16568 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16569 ALTIVEC_BUILTIN_LVX_V16QI);
16570 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16571 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16572 ALTIVEC_BUILTIN_STVX_V2DF);
16573 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16574 ALTIVEC_BUILTIN_STVX_V2DI);
16575 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16576 ALTIVEC_BUILTIN_STVX_V4SF);
16577 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16578 ALTIVEC_BUILTIN_STVX_V4SI);
16579 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16580 ALTIVEC_BUILTIN_STVX_V8HI);
16581 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16582 ALTIVEC_BUILTIN_STVX_V16QI);
16583 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16584 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16585 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16586 ALTIVEC_BUILTIN_STVXL_V2DF);
16587 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16588 ALTIVEC_BUILTIN_STVXL_V2DI);
16589 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16590 ALTIVEC_BUILTIN_STVXL_V4SF);
16591 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16592 ALTIVEC_BUILTIN_STVXL_V4SI);
16593 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16594 ALTIVEC_BUILTIN_STVXL_V8HI);
16595 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16596 ALTIVEC_BUILTIN_STVXL_V16QI);
16597 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16598 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16599 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16600 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16601 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16602 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16603 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16604 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16605 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16606 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16607 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16608 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16609 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16610 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16611 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16612 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16614 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16615 VSX_BUILTIN_LXVD2X_V2DF);
16616 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16617 VSX_BUILTIN_LXVD2X_V2DI);
16618 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16619 VSX_BUILTIN_LXVW4X_V4SF);
16620 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16621 VSX_BUILTIN_LXVW4X_V4SI);
16622 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16623 VSX_BUILTIN_LXVW4X_V8HI);
16624 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16625 VSX_BUILTIN_LXVW4X_V16QI);
16626 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16627 VSX_BUILTIN_STXVD2X_V2DF);
16628 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16629 VSX_BUILTIN_STXVD2X_V2DI);
16630 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16631 VSX_BUILTIN_STXVW4X_V4SF);
16632 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16633 VSX_BUILTIN_STXVW4X_V4SI);
16634 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16635 VSX_BUILTIN_STXVW4X_V8HI);
16636 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16637 VSX_BUILTIN_STXVW4X_V16QI);
16639 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16640 VSX_BUILTIN_LD_ELEMREV_V2DF);
16641 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16642 VSX_BUILTIN_LD_ELEMREV_V2DI);
16643 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16644 VSX_BUILTIN_LD_ELEMREV_V4SF);
16645 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16646 VSX_BUILTIN_LD_ELEMREV_V4SI);
16647 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16648 VSX_BUILTIN_ST_ELEMREV_V2DF);
16649 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16650 VSX_BUILTIN_ST_ELEMREV_V2DI);
16651 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16652 VSX_BUILTIN_ST_ELEMREV_V4SF);
16653 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16654 VSX_BUILTIN_ST_ELEMREV_V4SI);
16656 if (TARGET_P9_VECTOR)
16658 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16659 VSX_BUILTIN_LD_ELEMREV_V8HI);
16660 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16661 VSX_BUILTIN_LD_ELEMREV_V16QI);
16662 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
16663 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
16664 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
16665 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
16668 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16669 VSX_BUILTIN_VEC_LD);
16670 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16671 VSX_BUILTIN_VEC_ST);
16672 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16673 VSX_BUILTIN_VEC_XL);
16674 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16675 VSX_BUILTIN_VEC_XST);
16677 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16678 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16679 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16681 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16682 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16683 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16684 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16685 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16686 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16687 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16688 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16689 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16690 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16691 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16692 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16694 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16695 ALTIVEC_BUILTIN_VEC_ADDE);
16696 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16697 ALTIVEC_BUILTIN_VEC_ADDEC);
16698 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16699 ALTIVEC_BUILTIN_VEC_CMPNE);
16700 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16701 ALTIVEC_BUILTIN_VEC_MUL);
16703 /* Cell builtins. */
16704 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16705 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16706 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16707 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16709 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16710 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16711 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16712 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16714 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16715 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16716 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16717 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16719 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16720 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16721 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16722 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16724 /* Add the DST variants. */
16725 d = bdesc_dst;
16726 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16727 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16729 /* Initialize the predicates. */
16730 d = bdesc_altivec_preds;
16731 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16733 machine_mode mode1;
16734 tree type;
16736 if (rs6000_overloaded_builtin_p (d->code))
16737 mode1 = VOIDmode;
16738 else
16739 mode1 = insn_data[d->icode].operand[1].mode;
16741 switch (mode1)
16743 case VOIDmode:
16744 type = int_ftype_int_opaque_opaque;
16745 break;
16746 case V2DImode:
16747 type = int_ftype_int_v2di_v2di;
16748 break;
16749 case V4SImode:
16750 type = int_ftype_int_v4si_v4si;
16751 break;
16752 case V8HImode:
16753 type = int_ftype_int_v8hi_v8hi;
16754 break;
16755 case V16QImode:
16756 type = int_ftype_int_v16qi_v16qi;
16757 break;
16758 case V4SFmode:
16759 type = int_ftype_int_v4sf_v4sf;
16760 break;
16761 case V2DFmode:
16762 type = int_ftype_int_v2df_v2df;
16763 break;
16764 default:
16765 gcc_unreachable ();
16768 def_builtin (d->name, type, d->code);
16771 /* Initialize the abs* operators. */
16772 d = bdesc_abs;
16773 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16775 machine_mode mode0;
16776 tree type;
16778 mode0 = insn_data[d->icode].operand[0].mode;
16780 switch (mode0)
16782 case V2DImode:
16783 type = v2di_ftype_v2di;
16784 break;
16785 case V4SImode:
16786 type = v4si_ftype_v4si;
16787 break;
16788 case V8HImode:
16789 type = v8hi_ftype_v8hi;
16790 break;
16791 case V16QImode:
16792 type = v16qi_ftype_v16qi;
16793 break;
16794 case V4SFmode:
16795 type = v4sf_ftype_v4sf;
16796 break;
16797 case V2DFmode:
16798 type = v2df_ftype_v2df;
16799 break;
16800 default:
16801 gcc_unreachable ();
16804 def_builtin (d->name, type, d->code);
16807 /* Initialize target builtin that implements
16808 targetm.vectorize.builtin_mask_for_load. */
16810 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
16811 v16qi_ftype_long_pcvoid,
16812 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
16813 BUILT_IN_MD, NULL, NULL_TREE);
16814 TREE_READONLY (decl) = 1;
16815 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
16816 altivec_builtin_mask_for_load = decl;
16818 /* Access to the vec_init patterns. */
16819 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
16820 integer_type_node, integer_type_node,
16821 integer_type_node, NULL_TREE);
16822 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
16824 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
16825 short_integer_type_node,
16826 short_integer_type_node,
16827 short_integer_type_node,
16828 short_integer_type_node,
16829 short_integer_type_node,
16830 short_integer_type_node,
16831 short_integer_type_node, NULL_TREE);
16832 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
16834 ftype = build_function_type_list (V16QI_type_node, char_type_node,
16835 char_type_node, char_type_node,
16836 char_type_node, char_type_node,
16837 char_type_node, char_type_node,
16838 char_type_node, char_type_node,
16839 char_type_node, char_type_node,
16840 char_type_node, char_type_node,
16841 char_type_node, char_type_node,
16842 char_type_node, NULL_TREE);
16843 def_builtin ("__builtin_vec_init_v16qi", ftype,
16844 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
16846 ftype = build_function_type_list (V4SF_type_node, float_type_node,
16847 float_type_node, float_type_node,
16848 float_type_node, NULL_TREE);
16849 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
16851 /* VSX builtins. */
16852 ftype = build_function_type_list (V2DF_type_node, double_type_node,
16853 double_type_node, NULL_TREE);
16854 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
16856 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
16857 intDI_type_node, NULL_TREE);
16858 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
16860 /* Access to the vec_set patterns. */
16861 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
16862 intSI_type_node,
16863 integer_type_node, NULL_TREE);
16864 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
16866 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16867 intHI_type_node,
16868 integer_type_node, NULL_TREE);
16869 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
16871 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
16872 intQI_type_node,
16873 integer_type_node, NULL_TREE);
16874 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
16876 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
16877 float_type_node,
16878 integer_type_node, NULL_TREE);
16879 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
16881 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
16882 double_type_node,
16883 integer_type_node, NULL_TREE);
16884 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
16886 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
16887 intDI_type_node,
16888 integer_type_node, NULL_TREE);
16889 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
16891 /* Access to the vec_extract patterns. */
16892 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16893 integer_type_node, NULL_TREE);
16894 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
16896 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16897 integer_type_node, NULL_TREE);
16898 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
16900 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
16901 integer_type_node, NULL_TREE);
16902 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
16904 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16905 integer_type_node, NULL_TREE);
16906 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
16908 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16909 integer_type_node, NULL_TREE);
16910 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
16912 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
16913 integer_type_node, NULL_TREE);
16914 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
16917 if (V1TI_type_node)
16919 tree v1ti_ftype_long_pcvoid
16920 = build_function_type_list (V1TI_type_node,
16921 long_integer_type_node, pcvoid_type_node,
16922 NULL_TREE);
16923 tree void_ftype_v1ti_long_pvoid
16924 = build_function_type_list (void_type_node,
16925 V1TI_type_node, long_integer_type_node,
16926 pvoid_type_node, NULL_TREE);
16927 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
16928 VSX_BUILTIN_LXVD2X_V1TI);
16929 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
16930 VSX_BUILTIN_STXVD2X_V1TI);
16931 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
16932 NULL_TREE, NULL_TREE);
16933 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
16934 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
16935 intTI_type_node,
16936 integer_type_node, NULL_TREE);
16937 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
16938 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
16939 integer_type_node, NULL_TREE);
16940 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
16945 static void
16946 htm_init_builtins (void)
16948 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16949 const struct builtin_description *d;
16950 size_t i;
16952 d = bdesc_htm;
16953 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
16955 tree op[MAX_HTM_OPERANDS], type;
16956 HOST_WIDE_INT mask = d->mask;
16957 unsigned attr = rs6000_builtin_info[d->code].attr;
16958 bool void_func = (attr & RS6000_BTC_VOID);
16959 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
16960 int nopnds = 0;
16961 tree gpr_type_node;
16962 tree rettype;
16963 tree argtype;
16965 if (TARGET_32BIT && TARGET_POWERPC64)
16966 gpr_type_node = long_long_unsigned_type_node;
16967 else
16968 gpr_type_node = long_unsigned_type_node;
16970 if (attr & RS6000_BTC_SPR)
16972 rettype = gpr_type_node;
16973 argtype = gpr_type_node;
16975 else if (d->code == HTM_BUILTIN_TABORTDC
16976 || d->code == HTM_BUILTIN_TABORTDCI)
16978 rettype = unsigned_type_node;
16979 argtype = gpr_type_node;
16981 else
16983 rettype = unsigned_type_node;
16984 argtype = unsigned_type_node;
16987 if ((mask & builtin_mask) != mask)
16989 if (TARGET_DEBUG_BUILTIN)
16990 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
16991 continue;
16994 if (d->name == 0)
16996 if (TARGET_DEBUG_BUILTIN)
16997 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
16998 (long unsigned) i);
16999 continue;
17002 op[nopnds++] = (void_func) ? void_type_node : rettype;
17004 if (attr_args == RS6000_BTC_UNARY)
17005 op[nopnds++] = argtype;
17006 else if (attr_args == RS6000_BTC_BINARY)
17008 op[nopnds++] = argtype;
17009 op[nopnds++] = argtype;
17011 else if (attr_args == RS6000_BTC_TERNARY)
17013 op[nopnds++] = argtype;
17014 op[nopnds++] = argtype;
17015 op[nopnds++] = argtype;
17018 switch (nopnds)
17020 case 1:
17021 type = build_function_type_list (op[0], NULL_TREE);
17022 break;
17023 case 2:
17024 type = build_function_type_list (op[0], op[1], NULL_TREE);
17025 break;
17026 case 3:
17027 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17028 break;
17029 case 4:
17030 type = build_function_type_list (op[0], op[1], op[2], op[3],
17031 NULL_TREE);
17032 break;
17033 default:
17034 gcc_unreachable ();
17037 def_builtin (d->name, type, d->code);
17041 /* Hash function for builtin functions with up to 3 arguments and a return
17042 type. */
17043 hashval_t
17044 builtin_hasher::hash (builtin_hash_struct *bh)
17046 unsigned ret = 0;
17047 int i;
17049 for (i = 0; i < 4; i++)
17051 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17052 ret = (ret * 2) + bh->uns_p[i];
17055 return ret;
17058 /* Compare builtin hash entries H1 and H2 for equivalence. */
17059 bool
17060 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17062 return ((p1->mode[0] == p2->mode[0])
17063 && (p1->mode[1] == p2->mode[1])
17064 && (p1->mode[2] == p2->mode[2])
17065 && (p1->mode[3] == p2->mode[3])
17066 && (p1->uns_p[0] == p2->uns_p[0])
17067 && (p1->uns_p[1] == p2->uns_p[1])
17068 && (p1->uns_p[2] == p2->uns_p[2])
17069 && (p1->uns_p[3] == p2->uns_p[3]));
17072 /* Map types for builtin functions with an explicit return type and up to 3
17073 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17074 of the argument. */
17075 static tree
17076 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17077 machine_mode mode_arg1, machine_mode mode_arg2,
17078 enum rs6000_builtins builtin, const char *name)
17080 struct builtin_hash_struct h;
17081 struct builtin_hash_struct *h2;
17082 int num_args = 3;
17083 int i;
17084 tree ret_type = NULL_TREE;
17085 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17087 /* Create builtin_hash_table. */
17088 if (builtin_hash_table == NULL)
17089 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17091 h.type = NULL_TREE;
17092 h.mode[0] = mode_ret;
17093 h.mode[1] = mode_arg0;
17094 h.mode[2] = mode_arg1;
17095 h.mode[3] = mode_arg2;
17096 h.uns_p[0] = 0;
17097 h.uns_p[1] = 0;
17098 h.uns_p[2] = 0;
17099 h.uns_p[3] = 0;
17101 /* If the builtin is a type that produces unsigned results or takes unsigned
17102 arguments, and it is returned as a decl for the vectorizer (such as
17103 widening multiplies, permute), make sure the arguments and return value
17104 are type correct. */
17105 switch (builtin)
17107 /* unsigned 1 argument functions. */
17108 case CRYPTO_BUILTIN_VSBOX:
17109 case P8V_BUILTIN_VGBBD:
17110 case MISC_BUILTIN_CDTBCD:
17111 case MISC_BUILTIN_CBCDTD:
17112 h.uns_p[0] = 1;
17113 h.uns_p[1] = 1;
17114 break;
17116 /* unsigned 2 argument functions. */
17117 case ALTIVEC_BUILTIN_VMULEUB_UNS:
17118 case ALTIVEC_BUILTIN_VMULEUH_UNS:
17119 case ALTIVEC_BUILTIN_VMULOUB_UNS:
17120 case ALTIVEC_BUILTIN_VMULOUH_UNS:
17121 case CRYPTO_BUILTIN_VCIPHER:
17122 case CRYPTO_BUILTIN_VCIPHERLAST:
17123 case CRYPTO_BUILTIN_VNCIPHER:
17124 case CRYPTO_BUILTIN_VNCIPHERLAST:
17125 case CRYPTO_BUILTIN_VPMSUMB:
17126 case CRYPTO_BUILTIN_VPMSUMH:
17127 case CRYPTO_BUILTIN_VPMSUMW:
17128 case CRYPTO_BUILTIN_VPMSUMD:
17129 case CRYPTO_BUILTIN_VPMSUM:
17130 case MISC_BUILTIN_ADDG6S:
17131 case MISC_BUILTIN_DIVWEU:
17132 case MISC_BUILTIN_DIVWEUO:
17133 case MISC_BUILTIN_DIVDEU:
17134 case MISC_BUILTIN_DIVDEUO:
17135 h.uns_p[0] = 1;
17136 h.uns_p[1] = 1;
17137 h.uns_p[2] = 1;
17138 break;
17140 /* unsigned 3 argument functions. */
17141 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17142 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17143 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17144 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17145 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17146 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17147 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17148 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17149 case VSX_BUILTIN_VPERM_16QI_UNS:
17150 case VSX_BUILTIN_VPERM_8HI_UNS:
17151 case VSX_BUILTIN_VPERM_4SI_UNS:
17152 case VSX_BUILTIN_VPERM_2DI_UNS:
17153 case VSX_BUILTIN_XXSEL_16QI_UNS:
17154 case VSX_BUILTIN_XXSEL_8HI_UNS:
17155 case VSX_BUILTIN_XXSEL_4SI_UNS:
17156 case VSX_BUILTIN_XXSEL_2DI_UNS:
17157 case CRYPTO_BUILTIN_VPERMXOR:
17158 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17159 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17160 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17161 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17162 case CRYPTO_BUILTIN_VSHASIGMAW:
17163 case CRYPTO_BUILTIN_VSHASIGMAD:
17164 case CRYPTO_BUILTIN_VSHASIGMA:
17165 h.uns_p[0] = 1;
17166 h.uns_p[1] = 1;
17167 h.uns_p[2] = 1;
17168 h.uns_p[3] = 1;
17169 break;
17171 /* signed permute functions with unsigned char mask. */
17172 case ALTIVEC_BUILTIN_VPERM_16QI:
17173 case ALTIVEC_BUILTIN_VPERM_8HI:
17174 case ALTIVEC_BUILTIN_VPERM_4SI:
17175 case ALTIVEC_BUILTIN_VPERM_4SF:
17176 case ALTIVEC_BUILTIN_VPERM_2DI:
17177 case ALTIVEC_BUILTIN_VPERM_2DF:
17178 case VSX_BUILTIN_VPERM_16QI:
17179 case VSX_BUILTIN_VPERM_8HI:
17180 case VSX_BUILTIN_VPERM_4SI:
17181 case VSX_BUILTIN_VPERM_4SF:
17182 case VSX_BUILTIN_VPERM_2DI:
17183 case VSX_BUILTIN_VPERM_2DF:
17184 h.uns_p[3] = 1;
17185 break;
17187 /* unsigned args, signed return. */
17188 case VSX_BUILTIN_XVCVUXDDP_UNS:
17189 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17190 h.uns_p[1] = 1;
17191 break;
17193 /* signed args, unsigned return. */
17194 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17195 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17196 case MISC_BUILTIN_UNPACK_TD:
17197 case MISC_BUILTIN_UNPACK_V1TI:
17198 h.uns_p[0] = 1;
17199 break;
17201 /* unsigned arguments for 128-bit pack instructions. */
17202 case MISC_BUILTIN_PACK_TD:
17203 case MISC_BUILTIN_PACK_V1TI:
17204 h.uns_p[1] = 1;
17205 h.uns_p[2] = 1;
17206 break;
17208 default:
17209 break;
17212 /* Figure out how many args are present. */
17213 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17214 num_args--;
17216 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17217 if (!ret_type && h.uns_p[0])
17218 ret_type = builtin_mode_to_type[h.mode[0]][0];
17220 if (!ret_type)
17221 fatal_error (input_location,
17222 "internal error: builtin function %s had an unexpected "
17223 "return type %s", name, GET_MODE_NAME (h.mode[0]));
17225 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17226 arg_type[i] = NULL_TREE;
17228 for (i = 0; i < num_args; i++)
17230 int m = (int) h.mode[i+1];
17231 int uns_p = h.uns_p[i+1];
17233 arg_type[i] = builtin_mode_to_type[m][uns_p];
17234 if (!arg_type[i] && uns_p)
17235 arg_type[i] = builtin_mode_to_type[m][0];
17237 if (!arg_type[i])
17238 fatal_error (input_location,
17239 "internal error: builtin function %s, argument %d "
17240 "had unexpected argument type %s", name, i,
17241 GET_MODE_NAME (m));
17244 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17245 if (*found == NULL)
17247 h2 = ggc_alloc<builtin_hash_struct> ();
17248 *h2 = h;
17249 *found = h2;
17251 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17252 arg_type[2], NULL_TREE);
17255 return (*found)->type;
17258 static void
17259 rs6000_common_init_builtins (void)
17261 const struct builtin_description *d;
17262 size_t i;
17264 tree opaque_ftype_opaque = NULL_TREE;
17265 tree opaque_ftype_opaque_opaque = NULL_TREE;
17266 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17267 tree v2si_ftype = NULL_TREE;
17268 tree v2si_ftype_qi = NULL_TREE;
17269 tree v2si_ftype_v2si_qi = NULL_TREE;
17270 tree v2si_ftype_int_qi = NULL_TREE;
17271 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17273 if (!TARGET_PAIRED_FLOAT)
17275 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
17276 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
17279 /* Paired and SPE builtins are only available if you build a compiler with
17280 the appropriate options, so only create those builtins with the
17281 appropriate compiler option. Create Altivec and VSX builtins on machines
17282 with at least the general purpose extensions (970 and newer) to allow the
17283 use of the target attribute.. */
17285 if (TARGET_EXTRA_BUILTINS)
17286 builtin_mask |= RS6000_BTM_COMMON;
17288 /* Add the ternary operators. */
17289 d = bdesc_3arg;
17290 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17292 tree type;
17293 HOST_WIDE_INT mask = d->mask;
17295 if ((mask & builtin_mask) != mask)
17297 if (TARGET_DEBUG_BUILTIN)
17298 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17299 continue;
17302 if (rs6000_overloaded_builtin_p (d->code))
17304 if (! (type = opaque_ftype_opaque_opaque_opaque))
17305 type = opaque_ftype_opaque_opaque_opaque
17306 = build_function_type_list (opaque_V4SI_type_node,
17307 opaque_V4SI_type_node,
17308 opaque_V4SI_type_node,
17309 opaque_V4SI_type_node,
17310 NULL_TREE);
17312 else
17314 enum insn_code icode = d->icode;
17315 if (d->name == 0)
17317 if (TARGET_DEBUG_BUILTIN)
17318 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17319 (long unsigned)i);
17321 continue;
17324 if (icode == CODE_FOR_nothing)
17326 if (TARGET_DEBUG_BUILTIN)
17327 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17328 d->name);
17330 continue;
17333 type = builtin_function_type (insn_data[icode].operand[0].mode,
17334 insn_data[icode].operand[1].mode,
17335 insn_data[icode].operand[2].mode,
17336 insn_data[icode].operand[3].mode,
17337 d->code, d->name);
17340 def_builtin (d->name, type, d->code);
17343 /* Add the binary operators. */
17344 d = bdesc_2arg;
17345 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17347 machine_mode mode0, mode1, mode2;
17348 tree type;
17349 HOST_WIDE_INT mask = d->mask;
17351 if ((mask & builtin_mask) != mask)
17353 if (TARGET_DEBUG_BUILTIN)
17354 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17355 continue;
17358 if (rs6000_overloaded_builtin_p (d->code))
17360 if (! (type = opaque_ftype_opaque_opaque))
17361 type = opaque_ftype_opaque_opaque
17362 = build_function_type_list (opaque_V4SI_type_node,
17363 opaque_V4SI_type_node,
17364 opaque_V4SI_type_node,
17365 NULL_TREE);
17367 else
17369 enum insn_code icode = d->icode;
17370 if (d->name == 0)
17372 if (TARGET_DEBUG_BUILTIN)
17373 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17374 (long unsigned)i);
17376 continue;
17379 if (icode == CODE_FOR_nothing)
17381 if (TARGET_DEBUG_BUILTIN)
17382 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17383 d->name);
17385 continue;
17388 mode0 = insn_data[icode].operand[0].mode;
17389 mode1 = insn_data[icode].operand[1].mode;
17390 mode2 = insn_data[icode].operand[2].mode;
17392 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
17394 if (! (type = v2si_ftype_v2si_qi))
17395 type = v2si_ftype_v2si_qi
17396 = build_function_type_list (opaque_V2SI_type_node,
17397 opaque_V2SI_type_node,
17398 char_type_node,
17399 NULL_TREE);
17402 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
17403 && mode2 == QImode)
17405 if (! (type = v2si_ftype_int_qi))
17406 type = v2si_ftype_int_qi
17407 = build_function_type_list (opaque_V2SI_type_node,
17408 integer_type_node,
17409 char_type_node,
17410 NULL_TREE);
17413 else
17414 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17415 d->code, d->name);
17418 def_builtin (d->name, type, d->code);
17421 /* Add the simple unary operators. */
17422 d = bdesc_1arg;
17423 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17425 machine_mode mode0, mode1;
17426 tree type;
17427 HOST_WIDE_INT mask = d->mask;
17429 if ((mask & builtin_mask) != mask)
17431 if (TARGET_DEBUG_BUILTIN)
17432 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17433 continue;
17436 if (rs6000_overloaded_builtin_p (d->code))
17438 if (! (type = opaque_ftype_opaque))
17439 type = opaque_ftype_opaque
17440 = build_function_type_list (opaque_V4SI_type_node,
17441 opaque_V4SI_type_node,
17442 NULL_TREE);
17444 else
17446 enum insn_code icode = d->icode;
17447 if (d->name == 0)
17449 if (TARGET_DEBUG_BUILTIN)
17450 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17451 (long unsigned)i);
17453 continue;
17456 if (icode == CODE_FOR_nothing)
17458 if (TARGET_DEBUG_BUILTIN)
17459 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17460 d->name);
17462 continue;
17465 mode0 = insn_data[icode].operand[0].mode;
17466 mode1 = insn_data[icode].operand[1].mode;
17468 if (mode0 == V2SImode && mode1 == QImode)
17470 if (! (type = v2si_ftype_qi))
17471 type = v2si_ftype_qi
17472 = build_function_type_list (opaque_V2SI_type_node,
17473 char_type_node,
17474 NULL_TREE);
17477 else
17478 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17479 d->code, d->name);
17482 def_builtin (d->name, type, d->code);
17485 /* Add the simple no-argument operators. */
17486 d = bdesc_0arg;
17487 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17489 machine_mode mode0;
17490 tree type;
17491 HOST_WIDE_INT mask = d->mask;
17493 if ((mask & builtin_mask) != mask)
17495 if (TARGET_DEBUG_BUILTIN)
17496 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17497 continue;
17499 if (rs6000_overloaded_builtin_p (d->code))
17501 if (!opaque_ftype_opaque)
17502 opaque_ftype_opaque
17503 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17504 type = opaque_ftype_opaque;
17506 else
17508 enum insn_code icode = d->icode;
17509 if (d->name == 0)
17511 if (TARGET_DEBUG_BUILTIN)
17512 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17513 (long unsigned) i);
17514 continue;
17516 if (icode == CODE_FOR_nothing)
17518 if (TARGET_DEBUG_BUILTIN)
17519 fprintf (stderr,
17520 "rs6000_builtin, skip no-argument %s (no code)\n",
17521 d->name);
17522 continue;
17524 mode0 = insn_data[icode].operand[0].mode;
17525 if (mode0 == V2SImode)
17527 /* code for SPE */
17528 if (! (type = v2si_ftype))
17530 v2si_ftype
17531 = build_function_type_list (opaque_V2SI_type_node,
17532 NULL_TREE);
17533 type = v2si_ftype;
17536 else
17537 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17538 d->code, d->name);
17540 def_builtin (d->name, type, d->code);
17544 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17545 static void
17546 init_float128_ibm (machine_mode mode)
17548 if (!TARGET_XL_COMPAT)
17550 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17551 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17552 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17553 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17555 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
17557 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17558 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17559 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17560 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17561 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17562 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17563 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17565 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17566 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17567 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17568 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17569 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17570 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17571 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17572 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17575 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
17576 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17578 else
17580 set_optab_libfunc (add_optab, mode, "_xlqadd");
17581 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17582 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17583 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17586 /* Add various conversions for IFmode to use the traditional TFmode
17587 names. */
17588 if (mode == IFmode)
17590 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
17591 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
17592 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
17593 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
17594 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
17595 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
17597 if (TARGET_POWERPC64)
17599 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17600 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17601 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17602 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17607 /* Set up IEEE 128-bit floating point routines. Use different names if the
17608 arguments can be passed in a vector register. The historical PowerPC
17609 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17610 continue to use that if we aren't using vector registers to pass IEEE
17611 128-bit floating point. */
17613 static void
17614 init_float128_ieee (machine_mode mode)
17616 if (FLOAT128_VECTOR_P (mode))
17618 set_optab_libfunc (add_optab, mode, "__addkf3");
17619 set_optab_libfunc (sub_optab, mode, "__subkf3");
17620 set_optab_libfunc (neg_optab, mode, "__negkf2");
17621 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17622 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17623 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17624 set_optab_libfunc (abs_optab, mode, "__abstkf2");
17626 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17627 set_optab_libfunc (ne_optab, mode, "__nekf2");
17628 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17629 set_optab_libfunc (ge_optab, mode, "__gekf2");
17630 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17631 set_optab_libfunc (le_optab, mode, "__lekf2");
17632 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17634 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17635 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17636 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17637 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17639 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
17640 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17641 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
17643 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
17644 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17645 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
17647 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
17648 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
17649 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
17650 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
17651 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
17652 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
17654 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17655 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17656 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17657 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17659 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17660 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17661 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17662 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17664 if (TARGET_POWERPC64)
17666 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17667 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17668 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17669 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17673 else
17675 set_optab_libfunc (add_optab, mode, "_q_add");
17676 set_optab_libfunc (sub_optab, mode, "_q_sub");
17677 set_optab_libfunc (neg_optab, mode, "_q_neg");
17678 set_optab_libfunc (smul_optab, mode, "_q_mul");
17679 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17680 if (TARGET_PPC_GPOPT)
17681 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17683 set_optab_libfunc (eq_optab, mode, "_q_feq");
17684 set_optab_libfunc (ne_optab, mode, "_q_fne");
17685 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17686 set_optab_libfunc (ge_optab, mode, "_q_fge");
17687 set_optab_libfunc (lt_optab, mode, "_q_flt");
17688 set_optab_libfunc (le_optab, mode, "_q_fle");
17690 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17691 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17692 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17693 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17694 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17695 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17696 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17697 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17701 static void
17702 rs6000_init_libfuncs (void)
17704 /* __float128 support. */
17705 if (TARGET_FLOAT128)
17707 init_float128_ibm (IFmode);
17708 init_float128_ieee (KFmode);
17711 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17712 if (TARGET_LONG_DOUBLE_128)
17714 if (!TARGET_IEEEQUAD)
17715 init_float128_ibm (TFmode);
17717 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17718 else
17719 init_float128_ieee (TFmode);
17724 /* Expand a block clear operation, and return 1 if successful. Return 0
17725 if we should let the compiler generate normal code.
17727 operands[0] is the destination
17728 operands[1] is the length
17729 operands[3] is the alignment */
17732 expand_block_clear (rtx operands[])
17734 rtx orig_dest = operands[0];
17735 rtx bytes_rtx = operands[1];
17736 rtx align_rtx = operands[3];
17737 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
17738 HOST_WIDE_INT align;
17739 HOST_WIDE_INT bytes;
17740 int offset;
17741 int clear_bytes;
17742 int clear_step;
17744 /* If this is not a fixed size move, just call memcpy */
17745 if (! constp)
17746 return 0;
17748 /* This must be a fixed size alignment */
17749 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
17750 align = INTVAL (align_rtx) * BITS_PER_UNIT;
17752 /* Anything to clear? */
17753 bytes = INTVAL (bytes_rtx);
17754 if (bytes <= 0)
17755 return 1;
17757 /* Use the builtin memset after a point, to avoid huge code bloat.
17758 When optimize_size, avoid any significant code bloat; calling
17759 memset is about 4 instructions, so allow for one instruction to
17760 load zero and three to do clearing. */
17761 if (TARGET_ALTIVEC && align >= 128)
17762 clear_step = 16;
17763 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
17764 clear_step = 8;
17765 else if (TARGET_SPE && align >= 64)
17766 clear_step = 8;
17767 else
17768 clear_step = 4;
17770 if (optimize_size && bytes > 3 * clear_step)
17771 return 0;
17772 if (! optimize_size && bytes > 8 * clear_step)
17773 return 0;
17775 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
17777 machine_mode mode = BLKmode;
17778 rtx dest;
17780 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
17782 clear_bytes = 16;
17783 mode = V4SImode;
17785 else if (bytes >= 8 && TARGET_SPE && align >= 64)
17787 clear_bytes = 8;
17788 mode = V2SImode;
17790 else if (bytes >= 8 && TARGET_POWERPC64
17791 && (align >= 64 || !STRICT_ALIGNMENT))
17793 clear_bytes = 8;
17794 mode = DImode;
17795 if (offset == 0 && align < 64)
17797 rtx addr;
17799 /* If the address form is reg+offset with offset not a
17800 multiple of four, reload into reg indirect form here
17801 rather than waiting for reload. This way we get one
17802 reload, not one per store. */
17803 addr = XEXP (orig_dest, 0);
17804 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
17805 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17806 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
17808 addr = copy_addr_to_reg (addr);
17809 orig_dest = replace_equiv_address (orig_dest, addr);
17813 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
17814 { /* move 4 bytes */
17815 clear_bytes = 4;
17816 mode = SImode;
17818 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
17819 { /* move 2 bytes */
17820 clear_bytes = 2;
17821 mode = HImode;
17823 else /* move 1 byte at a time */
17825 clear_bytes = 1;
17826 mode = QImode;
17829 dest = adjust_address (orig_dest, mode, offset);
17831 emit_move_insn (dest, CONST0_RTX (mode));
17834 return 1;
17838 /* Expand a block move operation, and return 1 if successful. Return 0
17839 if we should let the compiler generate normal code.
17841 operands[0] is the destination
17842 operands[1] is the source
17843 operands[2] is the length
17844 operands[3] is the alignment */
17846 #define MAX_MOVE_REG 4
17849 expand_block_move (rtx operands[])
17851 rtx orig_dest = operands[0];
17852 rtx orig_src = operands[1];
17853 rtx bytes_rtx = operands[2];
17854 rtx align_rtx = operands[3];
17855 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
17856 int align;
17857 int bytes;
17858 int offset;
17859 int move_bytes;
17860 rtx stores[MAX_MOVE_REG];
17861 int num_reg = 0;
17863 /* If this is not a fixed size move, just call memcpy */
17864 if (! constp)
17865 return 0;
17867 /* This must be a fixed size alignment */
17868 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
17869 align = INTVAL (align_rtx) * BITS_PER_UNIT;
17871 /* Anything to move? */
17872 bytes = INTVAL (bytes_rtx);
17873 if (bytes <= 0)
17874 return 1;
17876 if (bytes > rs6000_block_move_inline_limit)
17877 return 0;
17879 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
17881 union {
17882 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
17883 rtx (*mov) (rtx, rtx);
17884 } gen_func;
17885 machine_mode mode = BLKmode;
17886 rtx src, dest;
17888 /* Altivec first, since it will be faster than a string move
17889 when it applies, and usually not significantly larger. */
17890 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
17892 move_bytes = 16;
17893 mode = V4SImode;
17894 gen_func.mov = gen_movv4si;
17896 else if (TARGET_SPE && bytes >= 8 && align >= 64)
17898 move_bytes = 8;
17899 mode = V2SImode;
17900 gen_func.mov = gen_movv2si;
17902 else if (TARGET_STRING
17903 && bytes > 24 /* move up to 32 bytes at a time */
17904 && ! fixed_regs[5]
17905 && ! fixed_regs[6]
17906 && ! fixed_regs[7]
17907 && ! fixed_regs[8]
17908 && ! fixed_regs[9]
17909 && ! fixed_regs[10]
17910 && ! fixed_regs[11]
17911 && ! fixed_regs[12])
17913 move_bytes = (bytes > 32) ? 32 : bytes;
17914 gen_func.movmemsi = gen_movmemsi_8reg;
17916 else if (TARGET_STRING
17917 && bytes > 16 /* move up to 24 bytes at a time */
17918 && ! fixed_regs[5]
17919 && ! fixed_regs[6]
17920 && ! fixed_regs[7]
17921 && ! fixed_regs[8]
17922 && ! fixed_regs[9]
17923 && ! fixed_regs[10])
17925 move_bytes = (bytes > 24) ? 24 : bytes;
17926 gen_func.movmemsi = gen_movmemsi_6reg;
17928 else if (TARGET_STRING
17929 && bytes > 8 /* move up to 16 bytes at a time */
17930 && ! fixed_regs[5]
17931 && ! fixed_regs[6]
17932 && ! fixed_regs[7]
17933 && ! fixed_regs[8])
17935 move_bytes = (bytes > 16) ? 16 : bytes;
17936 gen_func.movmemsi = gen_movmemsi_4reg;
17938 else if (bytes >= 8 && TARGET_POWERPC64
17939 && (align >= 64 || !STRICT_ALIGNMENT))
17941 move_bytes = 8;
17942 mode = DImode;
17943 gen_func.mov = gen_movdi;
17944 if (offset == 0 && align < 64)
17946 rtx addr;
17948 /* If the address form is reg+offset with offset not a
17949 multiple of four, reload into reg indirect form here
17950 rather than waiting for reload. This way we get one
17951 reload, not one per load and/or store. */
17952 addr = XEXP (orig_dest, 0);
17953 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
17954 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17955 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
17957 addr = copy_addr_to_reg (addr);
17958 orig_dest = replace_equiv_address (orig_dest, addr);
17960 addr = XEXP (orig_src, 0);
17961 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
17962 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17963 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
17965 addr = copy_addr_to_reg (addr);
17966 orig_src = replace_equiv_address (orig_src, addr);
17970 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
17971 { /* move up to 8 bytes at a time */
17972 move_bytes = (bytes > 8) ? 8 : bytes;
17973 gen_func.movmemsi = gen_movmemsi_2reg;
17975 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
17976 { /* move 4 bytes */
17977 move_bytes = 4;
17978 mode = SImode;
17979 gen_func.mov = gen_movsi;
17981 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
17982 { /* move 2 bytes */
17983 move_bytes = 2;
17984 mode = HImode;
17985 gen_func.mov = gen_movhi;
17987 else if (TARGET_STRING && bytes > 1)
17988 { /* move up to 4 bytes at a time */
17989 move_bytes = (bytes > 4) ? 4 : bytes;
17990 gen_func.movmemsi = gen_movmemsi_1reg;
17992 else /* move 1 byte at a time */
17994 move_bytes = 1;
17995 mode = QImode;
17996 gen_func.mov = gen_movqi;
17999 src = adjust_address (orig_src, mode, offset);
18000 dest = adjust_address (orig_dest, mode, offset);
18002 if (mode != BLKmode)
18004 rtx tmp_reg = gen_reg_rtx (mode);
18006 emit_insn ((*gen_func.mov) (tmp_reg, src));
18007 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
18010 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
18012 int i;
18013 for (i = 0; i < num_reg; i++)
18014 emit_insn (stores[i]);
18015 num_reg = 0;
18018 if (mode == BLKmode)
18020 /* Move the address into scratch registers. The movmemsi
18021 patterns require zero offset. */
18022 if (!REG_P (XEXP (src, 0)))
18024 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
18025 src = replace_equiv_address (src, src_reg);
18027 set_mem_size (src, move_bytes);
18029 if (!REG_P (XEXP (dest, 0)))
18031 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
18032 dest = replace_equiv_address (dest, dest_reg);
18034 set_mem_size (dest, move_bytes);
18036 emit_insn ((*gen_func.movmemsi) (dest, src,
18037 GEN_INT (move_bytes & 31),
18038 align_rtx));
18042 return 1;
18046 /* Return a string to perform a load_multiple operation.
18047 operands[0] is the vector.
18048 operands[1] is the source address.
18049 operands[2] is the first destination register. */
18051 const char *
18052 rs6000_output_load_multiple (rtx operands[3])
18054 /* We have to handle the case where the pseudo used to contain the address
18055 is assigned to one of the output registers. */
18056 int i, j;
18057 int words = XVECLEN (operands[0], 0);
18058 rtx xop[10];
18060 if (XVECLEN (operands[0], 0) == 1)
18061 return "lwz %2,0(%1)";
18063 for (i = 0; i < words; i++)
18064 if (refers_to_regno_p (REGNO (operands[2]) + i, operands[1]))
18066 if (i == words-1)
18068 xop[0] = GEN_INT (4 * (words-1));
18069 xop[1] = operands[1];
18070 xop[2] = operands[2];
18071 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
18072 return "";
18074 else if (i == 0)
18076 xop[0] = GEN_INT (4 * (words-1));
18077 xop[1] = operands[1];
18078 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
18079 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
18080 return "";
18082 else
18084 for (j = 0; j < words; j++)
18085 if (j != i)
18087 xop[0] = GEN_INT (j * 4);
18088 xop[1] = operands[1];
18089 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
18090 output_asm_insn ("lwz %2,%0(%1)", xop);
18092 xop[0] = GEN_INT (i * 4);
18093 xop[1] = operands[1];
18094 output_asm_insn ("lwz %1,%0(%1)", xop);
18095 return "";
18099 return "lswi %2,%1,%N0";
18103 /* A validation routine: say whether CODE, a condition code, and MODE
18104 match. The other alternatives either don't make sense or should
18105 never be generated. */
18107 void
18108 validate_condition_mode (enum rtx_code code, machine_mode mode)
18110 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18111 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18112 && GET_MODE_CLASS (mode) == MODE_CC);
18114 /* These don't make sense. */
18115 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18116 || mode != CCUNSmode);
18118 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18119 || mode == CCUNSmode);
18121 gcc_assert (mode == CCFPmode
18122 || (code != ORDERED && code != UNORDERED
18123 && code != UNEQ && code != LTGT
18124 && code != UNGT && code != UNLT
18125 && code != UNGE && code != UNLE));
18127 /* These should never be generated except for
18128 flag_finite_math_only. */
18129 gcc_assert (mode != CCFPmode
18130 || flag_finite_math_only
18131 || (code != LE && code != GE
18132 && code != UNEQ && code != LTGT
18133 && code != UNGT && code != UNLT));
18135 /* These are invalid; the information is not there. */
18136 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18140 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18141 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18142 not zero, store there the bit offset (counted from the right) where
18143 the single stretch of 1 bits begins; and similarly for B, the bit
18144 offset where it ends. */
18146 bool
18147 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18149 unsigned HOST_WIDE_INT val = INTVAL (mask);
18150 unsigned HOST_WIDE_INT bit;
18151 int nb, ne;
18152 int n = GET_MODE_PRECISION (mode);
18154 if (mode != DImode && mode != SImode)
18155 return false;
18157 if (INTVAL (mask) >= 0)
18159 bit = val & -val;
18160 ne = exact_log2 (bit);
18161 nb = exact_log2 (val + bit);
18163 else if (val + 1 == 0)
18165 nb = n;
18166 ne = 0;
18168 else if (val & 1)
18170 val = ~val;
18171 bit = val & -val;
18172 nb = exact_log2 (bit);
18173 ne = exact_log2 (val + bit);
18175 else
18177 bit = val & -val;
18178 ne = exact_log2 (bit);
18179 if (val + bit == 0)
18180 nb = n;
18181 else
18182 nb = 0;
18185 nb--;
18187 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18188 return false;
18190 if (b)
18191 *b = nb;
18192 if (e)
18193 *e = ne;
18195 return true;
18198 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18199 or rldicr instruction, to implement an AND with it in mode MODE. */
18201 bool
18202 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18204 int nb, ne;
18206 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18207 return false;
18209 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18210 does not wrap. */
18211 if (mode == DImode)
18212 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18214 /* For SImode, rlwinm can do everything. */
18215 if (mode == SImode)
18216 return (nb < 32 && ne < 32);
18218 return false;
18221 /* Return the instruction template for an AND with mask in mode MODE, with
18222 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18224 const char *
18225 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18227 int nb, ne;
18229 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18230 gcc_unreachable ();
18232 if (mode == DImode && ne == 0)
18234 operands[3] = GEN_INT (63 - nb);
18235 if (dot)
18236 return "rldicl. %0,%1,0,%3";
18237 return "rldicl %0,%1,0,%3";
18240 if (mode == DImode && nb == 63)
18242 operands[3] = GEN_INT (63 - ne);
18243 if (dot)
18244 return "rldicr. %0,%1,0,%3";
18245 return "rldicr %0,%1,0,%3";
18248 if (nb < 32 && ne < 32)
18250 operands[3] = GEN_INT (31 - nb);
18251 operands[4] = GEN_INT (31 - ne);
18252 if (dot)
18253 return "rlwinm. %0,%1,0,%3,%4";
18254 return "rlwinm %0,%1,0,%3,%4";
18257 gcc_unreachable ();
18260 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18261 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18262 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18264 bool
18265 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18267 int nb, ne;
18269 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18270 return false;
18272 int n = GET_MODE_PRECISION (mode);
18273 int sh = -1;
18275 if (CONST_INT_P (XEXP (shift, 1)))
18277 sh = INTVAL (XEXP (shift, 1));
18278 if (sh < 0 || sh >= n)
18279 return false;
18282 rtx_code code = GET_CODE (shift);
18284 /* Convert any shift by 0 to a rotate, to simplify below code. */
18285 if (sh == 0)
18286 code = ROTATE;
18288 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18289 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18290 code = ASHIFT;
18291 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18293 code = LSHIFTRT;
18294 sh = n - sh;
18297 /* DImode rotates need rld*. */
18298 if (mode == DImode && code == ROTATE)
18299 return (nb == 63 || ne == 0 || ne == sh);
18301 /* SImode rotates need rlw*. */
18302 if (mode == SImode && code == ROTATE)
18303 return (nb < 32 && ne < 32 && sh < 32);
18305 /* Wrap-around masks are only okay for rotates. */
18306 if (ne > nb)
18307 return false;
18309 /* Variable shifts are only okay for rotates. */
18310 if (sh < 0)
18311 return false;
18313 /* Don't allow ASHIFT if the mask is wrong for that. */
18314 if (code == ASHIFT && ne < sh)
18315 return false;
18317 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18318 if the mask is wrong for that. */
18319 if (nb < 32 && ne < 32 && sh < 32
18320 && !(code == LSHIFTRT && nb >= 32 - sh))
18321 return true;
18323 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18324 if the mask is wrong for that. */
18325 if (code == LSHIFTRT)
18326 sh = 64 - sh;
18327 if (nb == 63 || ne == 0 || ne == sh)
18328 return !(code == LSHIFTRT && nb >= sh);
18330 return false;
18333 /* Return the instruction template for a shift with mask in mode MODE, with
18334 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18336 const char *
18337 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18339 int nb, ne;
18341 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18342 gcc_unreachable ();
18344 if (mode == DImode && ne == 0)
18346 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18347 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18348 operands[3] = GEN_INT (63 - nb);
18349 if (dot)
18350 return "rld%I2cl. %0,%1,%2,%3";
18351 return "rld%I2cl %0,%1,%2,%3";
18354 if (mode == DImode && nb == 63)
18356 operands[3] = GEN_INT (63 - ne);
18357 if (dot)
18358 return "rld%I2cr. %0,%1,%2,%3";
18359 return "rld%I2cr %0,%1,%2,%3";
18362 if (mode == DImode
18363 && GET_CODE (operands[4]) != LSHIFTRT
18364 && CONST_INT_P (operands[2])
18365 && ne == INTVAL (operands[2]))
18367 operands[3] = GEN_INT (63 - nb);
18368 if (dot)
18369 return "rld%I2c. %0,%1,%2,%3";
18370 return "rld%I2c %0,%1,%2,%3";
18373 if (nb < 32 && ne < 32)
18375 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18376 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18377 operands[3] = GEN_INT (31 - nb);
18378 operands[4] = GEN_INT (31 - ne);
18379 /* This insn can also be a 64-bit rotate with mask that really makes
18380 it just a shift right (with mask); the %h below are to adjust for
18381 that situation (shift count is >= 32 in that case). */
18382 if (dot)
18383 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18384 return "rlw%I2nm %0,%1,%h2,%3,%4";
18387 gcc_unreachable ();
18390 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18391 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18392 ASHIFT, or LSHIFTRT) in mode MODE. */
18394 bool
18395 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18397 int nb, ne;
18399 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18400 return false;
18402 int n = GET_MODE_PRECISION (mode);
18404 int sh = INTVAL (XEXP (shift, 1));
18405 if (sh < 0 || sh >= n)
18406 return false;
18408 rtx_code code = GET_CODE (shift);
18410 /* Convert any shift by 0 to a rotate, to simplify below code. */
18411 if (sh == 0)
18412 code = ROTATE;
18414 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18415 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18416 code = ASHIFT;
18417 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18419 code = LSHIFTRT;
18420 sh = n - sh;
18423 /* DImode rotates need rldimi. */
18424 if (mode == DImode && code == ROTATE)
18425 return (ne == sh);
18427 /* SImode rotates need rlwimi. */
18428 if (mode == SImode && code == ROTATE)
18429 return (nb < 32 && ne < 32 && sh < 32);
18431 /* Wrap-around masks are only okay for rotates. */
18432 if (ne > nb)
18433 return false;
18435 /* Don't allow ASHIFT if the mask is wrong for that. */
18436 if (code == ASHIFT && ne < sh)
18437 return false;
18439 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18440 if the mask is wrong for that. */
18441 if (nb < 32 && ne < 32 && sh < 32
18442 && !(code == LSHIFTRT && nb >= 32 - sh))
18443 return true;
18445 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18446 if the mask is wrong for that. */
18447 if (code == LSHIFTRT)
18448 sh = 64 - sh;
18449 if (ne == sh)
18450 return !(code == LSHIFTRT && nb >= sh);
18452 return false;
18455 /* Return the instruction template for an insert with mask in mode MODE, with
18456 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18458 const char *
18459 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18461 int nb, ne;
18463 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18464 gcc_unreachable ();
18466 /* Prefer rldimi because rlwimi is cracked. */
18467 if (TARGET_POWERPC64
18468 && (!dot || mode == DImode)
18469 && GET_CODE (operands[4]) != LSHIFTRT
18470 && ne == INTVAL (operands[2]))
18472 operands[3] = GEN_INT (63 - nb);
18473 if (dot)
18474 return "rldimi. %0,%1,%2,%3";
18475 return "rldimi %0,%1,%2,%3";
18478 if (nb < 32 && ne < 32)
18480 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18481 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18482 operands[3] = GEN_INT (31 - nb);
18483 operands[4] = GEN_INT (31 - ne);
18484 if (dot)
18485 return "rlwimi. %0,%1,%2,%3,%4";
18486 return "rlwimi %0,%1,%2,%3,%4";
18489 gcc_unreachable ();
18492 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18493 using two machine instructions. */
18495 bool
18496 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18498 /* There are two kinds of AND we can handle with two insns:
18499 1) those we can do with two rl* insn;
18500 2) ori[s];xori[s].
18502 We do not handle that last case yet. */
18504 /* If there is just one stretch of ones, we can do it. */
18505 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18506 return true;
18508 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18509 one insn, we can do the whole thing with two. */
18510 unsigned HOST_WIDE_INT val = INTVAL (c);
18511 unsigned HOST_WIDE_INT bit1 = val & -val;
18512 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18513 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18514 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18515 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18518 /* Emit a potentially record-form instruction, setting DST from SRC.
18519 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18520 signed comparison of DST with zero. If DOT is 1, the generated RTL
18521 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18522 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18523 a separate COMPARE. */
18525 static void
18526 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18528 if (dot == 0)
18530 emit_move_insn (dst, src);
18531 return;
18534 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18536 emit_move_insn (dst, src);
18537 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18538 return;
18541 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18542 if (dot == 1)
18544 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18545 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18547 else
18549 rtx set = gen_rtx_SET (dst, src);
18550 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18554 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18555 If EXPAND is true, split rotate-and-mask instructions we generate to
18556 their constituent parts as well (this is used during expand); if DOT
18557 is 1, make the last insn a record-form instruction clobbering the
18558 destination GPR and setting the CC reg (from operands[3]); if 2, set
18559 that GPR as well as the CC reg. */
18561 void
18562 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18564 gcc_assert (!(expand && dot));
18566 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18568 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18569 shift right. This generates better code than doing the masks without
18570 shifts, or shifting first right and then left. */
18571 int nb, ne;
18572 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18574 gcc_assert (mode == DImode);
18576 int shift = 63 - nb;
18577 if (expand)
18579 rtx tmp1 = gen_reg_rtx (DImode);
18580 rtx tmp2 = gen_reg_rtx (DImode);
18581 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18582 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18583 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18585 else
18587 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18588 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18589 emit_move_insn (operands[0], tmp);
18590 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18591 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18593 return;
18596 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18597 that does the rest. */
18598 unsigned HOST_WIDE_INT bit1 = val & -val;
18599 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18600 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18601 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18603 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18604 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18606 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18608 /* Two "no-rotate"-and-mask instructions, for SImode. */
18609 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18611 gcc_assert (mode == SImode);
18613 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18614 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18615 emit_move_insn (reg, tmp);
18616 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18617 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18618 return;
18621 gcc_assert (mode == DImode);
18623 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18624 insns; we have to do the first in SImode, because it wraps. */
18625 if (mask2 <= 0xffffffff
18626 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18628 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18629 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18630 GEN_INT (mask1));
18631 rtx reg_low = gen_lowpart (SImode, reg);
18632 emit_move_insn (reg_low, tmp);
18633 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18634 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18635 return;
18638 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18639 at the top end), rotate back and clear the other hole. */
18640 int right = exact_log2 (bit3);
18641 int left = 64 - right;
18643 /* Rotate the mask too. */
18644 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18646 if (expand)
18648 rtx tmp1 = gen_reg_rtx (DImode);
18649 rtx tmp2 = gen_reg_rtx (DImode);
18650 rtx tmp3 = gen_reg_rtx (DImode);
18651 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18652 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18653 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18654 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18656 else
18658 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18659 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18660 emit_move_insn (operands[0], tmp);
18661 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18662 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18663 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18667 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18668 for lfq and stfq insns iff the registers are hard registers. */
18671 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18673 /* We might have been passed a SUBREG. */
18674 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18675 return 0;
18677 /* We might have been passed non floating point registers. */
18678 if (!FP_REGNO_P (REGNO (reg1))
18679 || !FP_REGNO_P (REGNO (reg2)))
18680 return 0;
18682 return (REGNO (reg1) == REGNO (reg2) - 1);
18685 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18686 addr1 and addr2 must be in consecutive memory locations
18687 (addr2 == addr1 + 8). */
18690 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18692 rtx addr1, addr2;
18693 unsigned int reg1, reg2;
18694 int offset1, offset2;
18696 /* The mems cannot be volatile. */
18697 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18698 return 0;
18700 addr1 = XEXP (mem1, 0);
18701 addr2 = XEXP (mem2, 0);
18703 /* Extract an offset (if used) from the first addr. */
18704 if (GET_CODE (addr1) == PLUS)
18706 /* If not a REG, return zero. */
18707 if (GET_CODE (XEXP (addr1, 0)) != REG)
18708 return 0;
18709 else
18711 reg1 = REGNO (XEXP (addr1, 0));
18712 /* The offset must be constant! */
18713 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18714 return 0;
18715 offset1 = INTVAL (XEXP (addr1, 1));
18718 else if (GET_CODE (addr1) != REG)
18719 return 0;
18720 else
18722 reg1 = REGNO (addr1);
18723 /* This was a simple (mem (reg)) expression. Offset is 0. */
18724 offset1 = 0;
18727 /* And now for the second addr. */
18728 if (GET_CODE (addr2) == PLUS)
18730 /* If not a REG, return zero. */
18731 if (GET_CODE (XEXP (addr2, 0)) != REG)
18732 return 0;
18733 else
18735 reg2 = REGNO (XEXP (addr2, 0));
18736 /* The offset must be constant. */
18737 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18738 return 0;
18739 offset2 = INTVAL (XEXP (addr2, 1));
18742 else if (GET_CODE (addr2) != REG)
18743 return 0;
18744 else
18746 reg2 = REGNO (addr2);
18747 /* This was a simple (mem (reg)) expression. Offset is 0. */
18748 offset2 = 0;
18751 /* Both of these must have the same base register. */
18752 if (reg1 != reg2)
18753 return 0;
18755 /* The offset for the second addr must be 8 more than the first addr. */
18756 if (offset2 != offset1 + 8)
18757 return 0;
18759 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18760 instructions. */
18761 return 1;
18766 rs6000_secondary_memory_needed_rtx (machine_mode mode)
18768 static bool eliminated = false;
18769 rtx ret;
18771 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
18772 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
18773 else
18775 rtx mem = cfun->machine->sdmode_stack_slot;
18776 gcc_assert (mem != NULL_RTX);
18778 if (!eliminated)
18780 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
18781 cfun->machine->sdmode_stack_slot = mem;
18782 eliminated = true;
18784 ret = mem;
18787 if (TARGET_DEBUG_ADDR)
18789 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
18790 GET_MODE_NAME (mode));
18791 if (!ret)
18792 fprintf (stderr, "\tNULL_RTX\n");
18793 else
18794 debug_rtx (ret);
18797 return ret;
18800 /* Return the mode to be used for memory when a secondary memory
18801 location is needed. For SDmode values we need to use DDmode, in
18802 all other cases we can use the same mode. */
18803 machine_mode
18804 rs6000_secondary_memory_needed_mode (machine_mode mode)
18806 if (lra_in_progress && mode == SDmode)
18807 return DDmode;
18808 return mode;
18811 static tree
18812 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
18814 /* Don't walk into types. */
18815 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
18817 *walk_subtrees = 0;
18818 return NULL_TREE;
18821 switch (TREE_CODE (*tp))
18823 case VAR_DECL:
18824 case PARM_DECL:
18825 case FIELD_DECL:
18826 case RESULT_DECL:
18827 case SSA_NAME:
18828 case REAL_CST:
18829 case MEM_REF:
18830 case VIEW_CONVERT_EXPR:
18831 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
18832 return *tp;
18833 break;
18834 default:
18835 break;
18838 return NULL_TREE;
18841 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18842 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18843 only work on the traditional altivec registers, note if an altivec register
18844 was chosen. */
18846 static enum rs6000_reg_type
18847 register_to_reg_type (rtx reg, bool *is_altivec)
18849 HOST_WIDE_INT regno;
18850 enum reg_class rclass;
18852 if (GET_CODE (reg) == SUBREG)
18853 reg = SUBREG_REG (reg);
18855 if (!REG_P (reg))
18856 return NO_REG_TYPE;
18858 regno = REGNO (reg);
18859 if (regno >= FIRST_PSEUDO_REGISTER)
18861 if (!lra_in_progress && !reload_in_progress && !reload_completed)
18862 return PSEUDO_REG_TYPE;
18864 regno = true_regnum (reg);
18865 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18866 return PSEUDO_REG_TYPE;
18869 gcc_assert (regno >= 0);
18871 if (is_altivec && ALTIVEC_REGNO_P (regno))
18872 *is_altivec = true;
18874 rclass = rs6000_regno_regclass[regno];
18875 return reg_class_to_reg_type[(int)rclass];
18878 /* Helper function to return the cost of adding a TOC entry address. */
18880 static inline int
18881 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18883 int ret;
18885 if (TARGET_CMODEL != CMODEL_SMALL)
18886 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18888 else
18889 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18891 return ret;
18894 /* Helper function for rs6000_secondary_reload to determine whether the memory
18895 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18896 needs reloading. Return negative if the memory is not handled by the memory
18897 helper functions and to try a different reload method, 0 if no additional
18898 instructions are need, and positive to give the extra cost for the
18899 memory. */
18901 static int
18902 rs6000_secondary_reload_memory (rtx addr,
18903 enum reg_class rclass,
18904 machine_mode mode)
18906 int extra_cost = 0;
18907 rtx reg, and_arg, plus_arg0, plus_arg1;
18908 addr_mask_type addr_mask;
18909 const char *type = NULL;
18910 const char *fail_msg = NULL;
18912 if (GPR_REG_CLASS_P (rclass))
18913 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18915 else if (rclass == FLOAT_REGS)
18916 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18918 else if (rclass == ALTIVEC_REGS)
18919 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18921 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18922 else if (rclass == VSX_REGS)
18923 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18924 & ~RELOAD_REG_AND_M16);
18926 /* If the register allocator hasn't made up its mind yet on the register
18927 class to use, settle on defaults to use. */
18928 else if (rclass == NO_REGS)
18930 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18931 & ~RELOAD_REG_AND_M16);
18933 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18934 addr_mask &= ~(RELOAD_REG_INDEXED
18935 | RELOAD_REG_PRE_INCDEC
18936 | RELOAD_REG_PRE_MODIFY);
18939 else
18940 addr_mask = 0;
18942 /* If the register isn't valid in this register class, just return now. */
18943 if ((addr_mask & RELOAD_REG_VALID) == 0)
18945 if (TARGET_DEBUG_ADDR)
18947 fprintf (stderr,
18948 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18949 "not valid in class\n",
18950 GET_MODE_NAME (mode), reg_class_names[rclass]);
18951 debug_rtx (addr);
18954 return -1;
18957 switch (GET_CODE (addr))
18959 /* Does the register class supports auto update forms for this mode? We
18960 don't need a scratch register, since the powerpc only supports
18961 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18962 case PRE_INC:
18963 case PRE_DEC:
18964 reg = XEXP (addr, 0);
18965 if (!base_reg_operand (addr, GET_MODE (reg)))
18967 fail_msg = "no base register #1";
18968 extra_cost = -1;
18971 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18973 extra_cost = 1;
18974 type = "update";
18976 break;
18978 case PRE_MODIFY:
18979 reg = XEXP (addr, 0);
18980 plus_arg1 = XEXP (addr, 1);
18981 if (!base_reg_operand (reg, GET_MODE (reg))
18982 || GET_CODE (plus_arg1) != PLUS
18983 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18985 fail_msg = "bad PRE_MODIFY";
18986 extra_cost = -1;
18989 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18991 extra_cost = 1;
18992 type = "update";
18994 break;
18996 /* Do we need to simulate AND -16 to clear the bottom address bits used
18997 in VMX load/stores? Only allow the AND for vector sizes. */
18998 case AND:
18999 and_arg = XEXP (addr, 0);
19000 if (GET_MODE_SIZE (mode) != 16
19001 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19002 || INTVAL (XEXP (addr, 1)) != -16)
19004 fail_msg = "bad Altivec AND #1";
19005 extra_cost = -1;
19008 if (rclass != ALTIVEC_REGS)
19010 if (legitimate_indirect_address_p (and_arg, false))
19011 extra_cost = 1;
19013 else if (legitimate_indexed_address_p (and_arg, false))
19014 extra_cost = 2;
19016 else
19018 fail_msg = "bad Altivec AND #2";
19019 extra_cost = -1;
19022 type = "and";
19024 break;
19026 /* If this is an indirect address, make sure it is a base register. */
19027 case REG:
19028 case SUBREG:
19029 if (!legitimate_indirect_address_p (addr, false))
19031 extra_cost = 1;
19032 type = "move";
19034 break;
19036 /* If this is an indexed address, make sure the register class can handle
19037 indexed addresses for this mode. */
19038 case PLUS:
19039 plus_arg0 = XEXP (addr, 0);
19040 plus_arg1 = XEXP (addr, 1);
19042 /* (plus (plus (reg) (constant)) (constant)) is generated during
19043 push_reload processing, so handle it now. */
19044 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19046 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19048 extra_cost = 1;
19049 type = "offset";
19053 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19054 push_reload processing, so handle it now. */
19055 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19057 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19059 extra_cost = 1;
19060 type = "indexed #2";
19064 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19066 fail_msg = "no base register #2";
19067 extra_cost = -1;
19070 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19072 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19073 || !legitimate_indexed_address_p (addr, false))
19075 extra_cost = 1;
19076 type = "indexed";
19080 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19081 && CONST_INT_P (plus_arg1))
19083 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19085 extra_cost = 1;
19086 type = "vector d-form offset";
19090 /* Make sure the register class can handle offset addresses. */
19091 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19093 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19095 extra_cost = 1;
19096 type = "offset #2";
19100 else
19102 fail_msg = "bad PLUS";
19103 extra_cost = -1;
19106 break;
19108 case LO_SUM:
19109 /* Quad offsets are restricted and can't handle normal addresses. */
19110 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19112 extra_cost = -1;
19113 type = "vector d-form lo_sum";
19116 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19118 fail_msg = "bad LO_SUM";
19119 extra_cost = -1;
19122 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19124 extra_cost = 1;
19125 type = "lo_sum";
19127 break;
19129 /* Static addresses need to create a TOC entry. */
19130 case CONST:
19131 case SYMBOL_REF:
19132 case LABEL_REF:
19133 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19135 extra_cost = -1;
19136 type = "vector d-form lo_sum #2";
19139 else
19141 type = "address";
19142 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19144 break;
19146 /* TOC references look like offsetable memory. */
19147 case UNSPEC:
19148 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19150 fail_msg = "bad UNSPEC";
19151 extra_cost = -1;
19154 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19156 extra_cost = -1;
19157 type = "vector d-form lo_sum #3";
19160 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19162 extra_cost = 1;
19163 type = "toc reference";
19165 break;
19167 default:
19169 fail_msg = "bad address";
19170 extra_cost = -1;
19174 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19176 if (extra_cost < 0)
19177 fprintf (stderr,
19178 "rs6000_secondary_reload_memory error: mode = %s, "
19179 "class = %s, addr_mask = '%s', %s\n",
19180 GET_MODE_NAME (mode),
19181 reg_class_names[rclass],
19182 rs6000_debug_addr_mask (addr_mask, false),
19183 (fail_msg != NULL) ? fail_msg : "<bad address>");
19185 else
19186 fprintf (stderr,
19187 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19188 "addr_mask = '%s', extra cost = %d, %s\n",
19189 GET_MODE_NAME (mode),
19190 reg_class_names[rclass],
19191 rs6000_debug_addr_mask (addr_mask, false),
19192 extra_cost,
19193 (type) ? type : "<none>");
19195 debug_rtx (addr);
19198 return extra_cost;
19201 /* Helper function for rs6000_secondary_reload to return true if a move to a
19202 different register classe is really a simple move. */
19204 static bool
19205 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19206 enum rs6000_reg_type from_type,
19207 machine_mode mode)
19209 int size;
19211 /* Add support for various direct moves available. In this function, we only
19212 look at cases where we don't need any extra registers, and one or more
19213 simple move insns are issued. At present, 32-bit integers are not allowed
19214 in FPR/VSX registers. Single precision binary floating is not a simple
19215 move because we need to convert to the single precision memory layout.
19216 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19217 need special direct move handling, which we do not support yet. */
19218 size = GET_MODE_SIZE (mode);
19219 if (TARGET_DIRECT_MOVE
19220 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
19221 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19222 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19223 return true;
19225 else if (TARGET_DIRECT_MOVE_128 && size == 16 && mode != TDmode
19226 && ((to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19227 || (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)))
19228 return true;
19230 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19231 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19232 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19233 return true;
19235 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19236 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19237 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19238 return true;
19240 return false;
19243 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19244 special direct moves that involve allocating an extra register, return the
19245 insn code of the helper function if there is such a function or
19246 CODE_FOR_nothing if not. */
19248 static bool
19249 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19250 enum rs6000_reg_type from_type,
19251 machine_mode mode,
19252 secondary_reload_info *sri,
19253 bool altivec_p)
19255 bool ret = false;
19256 enum insn_code icode = CODE_FOR_nothing;
19257 int cost = 0;
19258 int size = GET_MODE_SIZE (mode);
19260 if (TARGET_POWERPC64)
19262 if (size == 16)
19264 /* Handle moving 128-bit values from GPRs to VSX point registers on
19265 ISA 2.07 (power8, power9) when running in 64-bit mode using
19266 XXPERMDI to glue the two 64-bit values back together. */
19267 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19269 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19270 icode = reg_addr[mode].reload_vsx_gpr;
19273 /* Handle moving 128-bit values from VSX point registers to GPRs on
19274 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19275 bottom 64-bit value. */
19276 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19278 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19279 icode = reg_addr[mode].reload_gpr_vsx;
19283 else if (mode == SFmode)
19285 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19287 cost = 3; /* xscvdpspn, mfvsrd, and. */
19288 icode = reg_addr[mode].reload_gpr_vsx;
19291 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19293 cost = 2; /* mtvsrz, xscvspdpn. */
19294 icode = reg_addr[mode].reload_vsx_gpr;
19299 if (TARGET_POWERPC64 && size == 16)
19301 /* Handle moving 128-bit values from GPRs to VSX point registers on
19302 ISA 2.07 when running in 64-bit mode using XXPERMDI to glue the two
19303 64-bit values back together. */
19304 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19306 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19307 icode = reg_addr[mode].reload_vsx_gpr;
19310 /* Handle moving 128-bit values from VSX point registers to GPRs on
19311 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19312 bottom 64-bit value. */
19313 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19315 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19316 icode = reg_addr[mode].reload_gpr_vsx;
19320 else if (!TARGET_POWERPC64 && size == 8)
19322 /* Handle moving 64-bit values from GPRs to floating point registers on
19323 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19324 32-bit values back together. Altivec register classes must be handled
19325 specially since a different instruction is used, and the secondary
19326 reload support requires a single instruction class in the scratch
19327 register constraint. However, right now TFmode is not allowed in
19328 Altivec registers, so the pattern will never match. */
19329 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19331 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19332 icode = reg_addr[mode].reload_fpr_gpr;
19336 if (icode != CODE_FOR_nothing)
19338 ret = true;
19339 if (sri)
19341 sri->icode = icode;
19342 sri->extra_cost = cost;
19346 return ret;
19349 /* Return whether a move between two register classes can be done either
19350 directly (simple move) or via a pattern that uses a single extra temporary
19351 (using ISA 2.07's direct move in this case. */
19353 static bool
19354 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19355 enum rs6000_reg_type from_type,
19356 machine_mode mode,
19357 secondary_reload_info *sri,
19358 bool altivec_p)
19360 /* Fall back to load/store reloads if either type is not a register. */
19361 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19362 return false;
19364 /* If we haven't allocated registers yet, assume the move can be done for the
19365 standard register types. */
19366 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19367 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19368 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19369 return true;
19371 /* Moves to the same set of registers is a simple move for non-specialized
19372 registers. */
19373 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19374 return true;
19376 /* Check whether a simple move can be done directly. */
19377 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19379 if (sri)
19381 sri->icode = CODE_FOR_nothing;
19382 sri->extra_cost = 0;
19384 return true;
19387 /* Now check if we can do it in a few steps. */
19388 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19389 altivec_p);
19392 /* Inform reload about cases where moving X with a mode MODE to a register in
19393 RCLASS requires an extra scratch or immediate register. Return the class
19394 needed for the immediate register.
19396 For VSX and Altivec, we may need a register to convert sp+offset into
19397 reg+sp.
19399 For misaligned 64-bit gpr loads and stores we need a register to
19400 convert an offset address to indirect. */
19402 static reg_class_t
19403 rs6000_secondary_reload (bool in_p,
19404 rtx x,
19405 reg_class_t rclass_i,
19406 machine_mode mode,
19407 secondary_reload_info *sri)
19409 enum reg_class rclass = (enum reg_class) rclass_i;
19410 reg_class_t ret = ALL_REGS;
19411 enum insn_code icode;
19412 bool default_p = false;
19413 bool done_p = false;
19415 /* Allow subreg of memory before/during reload. */
19416 bool memory_p = (MEM_P (x)
19417 || (!reload_completed && GET_CODE (x) == SUBREG
19418 && MEM_P (SUBREG_REG (x))));
19420 sri->icode = CODE_FOR_nothing;
19421 sri->t_icode = CODE_FOR_nothing;
19422 sri->extra_cost = 0;
19423 icode = ((in_p)
19424 ? reg_addr[mode].reload_load
19425 : reg_addr[mode].reload_store);
19427 if (REG_P (x) || register_operand (x, mode))
19429 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19430 bool altivec_p = (rclass == ALTIVEC_REGS);
19431 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19433 if (!in_p)
19435 enum rs6000_reg_type exchange = to_type;
19436 to_type = from_type;
19437 from_type = exchange;
19440 /* Can we do a direct move of some sort? */
19441 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19442 altivec_p))
19444 icode = (enum insn_code)sri->icode;
19445 default_p = false;
19446 done_p = true;
19447 ret = NO_REGS;
19451 /* Make sure 0.0 is not reloaded or forced into memory. */
19452 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19454 ret = NO_REGS;
19455 default_p = false;
19456 done_p = true;
19459 /* If this is a scalar floating point value and we want to load it into the
19460 traditional Altivec registers, do it via a move via a traditional floating
19461 point register, unless we have D-form addressing. Also make sure that
19462 non-zero constants use a FPR. */
19463 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19464 && !mode_supports_vmx_dform (mode)
19465 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19466 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19468 ret = FLOAT_REGS;
19469 default_p = false;
19470 done_p = true;
19473 /* Handle reload of load/stores if we have reload helper functions. */
19474 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19476 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19477 mode);
19479 if (extra_cost >= 0)
19481 done_p = true;
19482 ret = NO_REGS;
19483 if (extra_cost > 0)
19485 sri->extra_cost = extra_cost;
19486 sri->icode = icode;
19491 /* Handle unaligned loads and stores of integer registers. */
19492 if (!done_p && TARGET_POWERPC64
19493 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19494 && memory_p
19495 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19497 rtx addr = XEXP (x, 0);
19498 rtx off = address_offset (addr);
19500 if (off != NULL_RTX)
19502 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19503 unsigned HOST_WIDE_INT offset = INTVAL (off);
19505 /* We need a secondary reload when our legitimate_address_p
19506 says the address is good (as otherwise the entire address
19507 will be reloaded), and the offset is not a multiple of
19508 four or we have an address wrap. Address wrap will only
19509 occur for LO_SUMs since legitimate_offset_address_p
19510 rejects addresses for 16-byte mems that will wrap. */
19511 if (GET_CODE (addr) == LO_SUM
19512 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19513 && ((offset & 3) != 0
19514 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19515 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19516 && (offset & 3) != 0))
19518 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19519 if (in_p)
19520 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19521 : CODE_FOR_reload_di_load);
19522 else
19523 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19524 : CODE_FOR_reload_di_store);
19525 sri->extra_cost = 2;
19526 ret = NO_REGS;
19527 done_p = true;
19529 else
19530 default_p = true;
19532 else
19533 default_p = true;
19536 if (!done_p && !TARGET_POWERPC64
19537 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19538 && memory_p
19539 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19541 rtx addr = XEXP (x, 0);
19542 rtx off = address_offset (addr);
19544 if (off != NULL_RTX)
19546 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19547 unsigned HOST_WIDE_INT offset = INTVAL (off);
19549 /* We need a secondary reload when our legitimate_address_p
19550 says the address is good (as otherwise the entire address
19551 will be reloaded), and we have a wrap.
19553 legitimate_lo_sum_address_p allows LO_SUM addresses to
19554 have any offset so test for wrap in the low 16 bits.
19556 legitimate_offset_address_p checks for the range
19557 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19558 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19559 [0x7ff4,0x7fff] respectively, so test for the
19560 intersection of these ranges, [0x7ffc,0x7fff] and
19561 [0x7ff4,0x7ff7] respectively.
19563 Note that the address we see here may have been
19564 manipulated by legitimize_reload_address. */
19565 if (GET_CODE (addr) == LO_SUM
19566 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19567 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19569 if (in_p)
19570 sri->icode = CODE_FOR_reload_si_load;
19571 else
19572 sri->icode = CODE_FOR_reload_si_store;
19573 sri->extra_cost = 2;
19574 ret = NO_REGS;
19575 done_p = true;
19577 else
19578 default_p = true;
19580 else
19581 default_p = true;
19584 if (!done_p)
19585 default_p = true;
19587 if (default_p)
19588 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19590 gcc_assert (ret != ALL_REGS);
19592 if (TARGET_DEBUG_ADDR)
19594 fprintf (stderr,
19595 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19596 "mode = %s",
19597 reg_class_names[ret],
19598 in_p ? "true" : "false",
19599 reg_class_names[rclass],
19600 GET_MODE_NAME (mode));
19602 if (reload_completed)
19603 fputs (", after reload", stderr);
19605 if (!done_p)
19606 fputs (", done_p not set", stderr);
19608 if (default_p)
19609 fputs (", default secondary reload", stderr);
19611 if (sri->icode != CODE_FOR_nothing)
19612 fprintf (stderr, ", reload func = %s, extra cost = %d",
19613 insn_data[sri->icode].name, sri->extra_cost);
19615 else if (sri->extra_cost > 0)
19616 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19618 fputs ("\n", stderr);
19619 debug_rtx (x);
19622 return ret;
19625 /* Better tracing for rs6000_secondary_reload_inner. */
19627 static void
19628 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19629 bool store_p)
19631 rtx set, clobber;
19633 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19635 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19636 store_p ? "store" : "load");
19638 if (store_p)
19639 set = gen_rtx_SET (mem, reg);
19640 else
19641 set = gen_rtx_SET (reg, mem);
19643 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19644 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19647 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19648 ATTRIBUTE_NORETURN;
19650 static void
19651 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19652 bool store_p)
19654 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19655 gcc_unreachable ();
19658 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19659 reload helper functions. These were identified in
19660 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19661 reload, it calls the insns:
19662 reload_<RELOAD:mode>_<P:mptrsize>_store
19663 reload_<RELOAD:mode>_<P:mptrsize>_load
19665 which in turn calls this function, to do whatever is necessary to create
19666 valid addresses. */
19668 void
19669 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19671 int regno = true_regnum (reg);
19672 machine_mode mode = GET_MODE (reg);
19673 addr_mask_type addr_mask;
19674 rtx addr;
19675 rtx new_addr;
19676 rtx op_reg, op0, op1;
19677 rtx and_op;
19678 rtx cc_clobber;
19679 rtvec rv;
19681 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19682 || !base_reg_operand (scratch, GET_MODE (scratch)))
19683 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19685 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19686 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19688 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19689 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19691 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19692 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19694 else
19695 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19697 /* Make sure the mode is valid in this register class. */
19698 if ((addr_mask & RELOAD_REG_VALID) == 0)
19699 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19701 if (TARGET_DEBUG_ADDR)
19702 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19704 new_addr = addr = XEXP (mem, 0);
19705 switch (GET_CODE (addr))
19707 /* Does the register class support auto update forms for this mode? If
19708 not, do the update now. We don't need a scratch register, since the
19709 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19710 case PRE_INC:
19711 case PRE_DEC:
19712 op_reg = XEXP (addr, 0);
19713 if (!base_reg_operand (op_reg, Pmode))
19714 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19716 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19718 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
19719 new_addr = op_reg;
19721 break;
19723 case PRE_MODIFY:
19724 op0 = XEXP (addr, 0);
19725 op1 = XEXP (addr, 1);
19726 if (!base_reg_operand (op0, Pmode)
19727 || GET_CODE (op1) != PLUS
19728 || !rtx_equal_p (op0, XEXP (op1, 0)))
19729 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19731 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19733 emit_insn (gen_rtx_SET (op0, op1));
19734 new_addr = reg;
19736 break;
19738 /* Do we need to simulate AND -16 to clear the bottom address bits used
19739 in VMX load/stores? */
19740 case AND:
19741 op0 = XEXP (addr, 0);
19742 op1 = XEXP (addr, 1);
19743 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19745 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19746 op_reg = op0;
19748 else if (GET_CODE (op1) == PLUS)
19750 emit_insn (gen_rtx_SET (scratch, op1));
19751 op_reg = scratch;
19754 else
19755 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19757 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19758 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19759 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19760 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19761 new_addr = scratch;
19763 break;
19765 /* If this is an indirect address, make sure it is a base register. */
19766 case REG:
19767 case SUBREG:
19768 if (!base_reg_operand (addr, GET_MODE (addr)))
19770 emit_insn (gen_rtx_SET (scratch, addr));
19771 new_addr = scratch;
19773 break;
19775 /* If this is an indexed address, make sure the register class can handle
19776 indexed addresses for this mode. */
19777 case PLUS:
19778 op0 = XEXP (addr, 0);
19779 op1 = XEXP (addr, 1);
19780 if (!base_reg_operand (op0, Pmode))
19781 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19783 else if (int_reg_operand (op1, Pmode))
19785 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19787 emit_insn (gen_rtx_SET (scratch, addr));
19788 new_addr = scratch;
19792 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
19794 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19795 || !quad_address_p (addr, mode, false))
19797 emit_insn (gen_rtx_SET (scratch, addr));
19798 new_addr = scratch;
19802 /* Make sure the register class can handle offset addresses. */
19803 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19805 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19807 emit_insn (gen_rtx_SET (scratch, addr));
19808 new_addr = scratch;
19812 else
19813 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19815 break;
19817 case LO_SUM:
19818 op0 = XEXP (addr, 0);
19819 op1 = XEXP (addr, 1);
19820 if (!base_reg_operand (op0, Pmode))
19821 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19823 else if (int_reg_operand (op1, Pmode))
19825 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19827 emit_insn (gen_rtx_SET (scratch, addr));
19828 new_addr = scratch;
19832 /* Quad offsets are restricted and can't handle normal addresses. */
19833 else if (mode_supports_vsx_dform_quad (mode))
19835 emit_insn (gen_rtx_SET (scratch, addr));
19836 new_addr = scratch;
19839 /* Make sure the register class can handle offset addresses. */
19840 else if (legitimate_lo_sum_address_p (mode, addr, false))
19842 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19844 emit_insn (gen_rtx_SET (scratch, addr));
19845 new_addr = scratch;
19849 else
19850 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19852 break;
19854 case SYMBOL_REF:
19855 case CONST:
19856 case LABEL_REF:
19857 rs6000_emit_move (scratch, addr, Pmode);
19858 new_addr = scratch;
19859 break;
19861 default:
19862 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19865 /* Adjust the address if it changed. */
19866 if (addr != new_addr)
19868 mem = replace_equiv_address_nv (mem, new_addr);
19869 if (TARGET_DEBUG_ADDR)
19870 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19873 /* Now create the move. */
19874 if (store_p)
19875 emit_insn (gen_rtx_SET (mem, reg));
19876 else
19877 emit_insn (gen_rtx_SET (reg, mem));
19879 return;
19882 /* Convert reloads involving 64-bit gprs and misaligned offset
19883 addressing, or multiple 32-bit gprs and offsets that are too large,
19884 to use indirect addressing. */
19886 void
19887 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19889 int regno = true_regnum (reg);
19890 enum reg_class rclass;
19891 rtx addr;
19892 rtx scratch_or_premodify = scratch;
19894 if (TARGET_DEBUG_ADDR)
19896 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19897 store_p ? "store" : "load");
19898 fprintf (stderr, "reg:\n");
19899 debug_rtx (reg);
19900 fprintf (stderr, "mem:\n");
19901 debug_rtx (mem);
19902 fprintf (stderr, "scratch:\n");
19903 debug_rtx (scratch);
19906 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
19907 gcc_assert (GET_CODE (mem) == MEM);
19908 rclass = REGNO_REG_CLASS (regno);
19909 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19910 addr = XEXP (mem, 0);
19912 if (GET_CODE (addr) == PRE_MODIFY)
19914 gcc_assert (REG_P (XEXP (addr, 0))
19915 && GET_CODE (XEXP (addr, 1)) == PLUS
19916 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19917 scratch_or_premodify = XEXP (addr, 0);
19918 if (!HARD_REGISTER_P (scratch_or_premodify))
19919 /* If we have a pseudo here then reload will have arranged
19920 to have it replaced, but only in the original insn.
19921 Use the replacement here too. */
19922 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
19924 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
19925 expressions from the original insn, without unsharing them.
19926 Any RTL that points into the original insn will of course
19927 have register replacements applied. That is why we don't
19928 need to look for replacements under the PLUS. */
19929 addr = XEXP (addr, 1);
19931 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19933 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19935 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19937 /* Now create the move. */
19938 if (store_p)
19939 emit_insn (gen_rtx_SET (mem, reg));
19940 else
19941 emit_insn (gen_rtx_SET (reg, mem));
19943 return;
19946 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
19947 this function has any SDmode references. If we are on a power7 or later, we
19948 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
19949 can load/store the value. */
19951 static void
19952 rs6000_alloc_sdmode_stack_slot (void)
19954 tree t;
19955 basic_block bb;
19956 gimple_stmt_iterator gsi;
19958 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
19959 /* We use a different approach for dealing with the secondary
19960 memory in LRA. */
19961 if (ira_use_lra_p)
19962 return;
19964 if (TARGET_NO_SDMODE_STACK)
19965 return;
19967 FOR_EACH_BB_FN (bb, cfun)
19968 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
19970 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
19971 if (ret)
19973 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
19974 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
19975 SDmode, 0);
19976 return;
19980 /* Check for any SDmode parameters of the function. */
19981 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
19983 if (TREE_TYPE (t) == error_mark_node)
19984 continue;
19986 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
19987 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
19989 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
19990 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
19991 SDmode, 0);
19992 return;
19997 static void
19998 rs6000_instantiate_decls (void)
20000 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
20001 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
20004 /* Given an rtx X being reloaded into a reg required to be
20005 in class CLASS, return the class of reg to actually use.
20006 In general this is just CLASS; but on some machines
20007 in some cases it is preferable to use a more restrictive class.
20009 On the RS/6000, we have to return NO_REGS when we want to reload a
20010 floating-point CONST_DOUBLE to force it to be copied to memory.
20012 We also don't want to reload integer values into floating-point
20013 registers if we can at all help it. In fact, this can
20014 cause reload to die, if it tries to generate a reload of CTR
20015 into a FP register and discovers it doesn't have the memory location
20016 required.
20018 ??? Would it be a good idea to have reload do the converse, that is
20019 try to reload floating modes into FP registers if possible?
20022 static enum reg_class
20023 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20025 machine_mode mode = GET_MODE (x);
20026 bool is_constant = CONSTANT_P (x);
20028 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20029 reload class for it. */
20030 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20031 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20032 return NO_REGS;
20034 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20035 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20036 return NO_REGS;
20038 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20039 the reloading of address expressions using PLUS into floating point
20040 registers. */
20041 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20043 if (is_constant)
20045 /* Zero is always allowed in all VSX registers. */
20046 if (x == CONST0_RTX (mode))
20047 return rclass;
20049 /* If this is a vector constant that can be formed with a few Altivec
20050 instructions, we want altivec registers. */
20051 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20052 return ALTIVEC_REGS;
20054 /* Force constant to memory. */
20055 return NO_REGS;
20058 /* D-form addressing can easily reload the value. */
20059 if (mode_supports_vmx_dform (mode)
20060 || mode_supports_vsx_dform_quad (mode))
20061 return rclass;
20063 /* If this is a scalar floating point value and we don't have D-form
20064 addressing, prefer the traditional floating point registers so that we
20065 can use D-form (register+offset) addressing. */
20066 if (GET_MODE_SIZE (mode) < 16 && rclass == VSX_REGS)
20067 return FLOAT_REGS;
20069 /* Prefer the Altivec registers if Altivec is handling the vector
20070 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20071 loads. */
20072 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20073 || mode == V1TImode)
20074 return ALTIVEC_REGS;
20076 return rclass;
20079 if (is_constant || GET_CODE (x) == PLUS)
20081 if (reg_class_subset_p (GENERAL_REGS, rclass))
20082 return GENERAL_REGS;
20083 if (reg_class_subset_p (BASE_REGS, rclass))
20084 return BASE_REGS;
20085 return NO_REGS;
20088 /* If we haven't picked a register class, and the type is a vector or
20089 floating point type, prefer to use the VSX, FPR, or Altivec register
20090 classes. */
20091 if (rclass == NO_REGS)
20093 if (TARGET_VSX && VECTOR_MEM_VSX_OR_P8_VECTOR_P (mode))
20094 return VSX_REGS;
20096 if (TARGET_ALTIVEC && VECTOR_MEM_ALTIVEC_P (mode))
20097 return ALTIVEC_REGS;
20099 if (DECIMAL_FLOAT_MODE_P (mode))
20100 return TARGET_DFP ? FLOAT_REGS : NO_REGS;
20102 if (TARGET_FPRS && TARGET_HARD_FLOAT && FLOAT_MODE_P (mode)
20103 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20104 return FLOAT_REGS;
20107 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20108 return GENERAL_REGS;
20110 return rclass;
20113 /* Debug version of rs6000_preferred_reload_class. */
20114 static enum reg_class
20115 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20117 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20119 fprintf (stderr,
20120 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20121 "mode = %s, x:\n",
20122 reg_class_names[ret], reg_class_names[rclass],
20123 GET_MODE_NAME (GET_MODE (x)));
20124 debug_rtx (x);
20126 return ret;
20129 /* If we are copying between FP or AltiVec registers and anything else, we need
20130 a memory location. The exception is when we are targeting ppc64 and the
20131 move to/from fpr to gpr instructions are available. Also, under VSX, you
20132 can copy vector registers from the FP register set to the Altivec register
20133 set and vice versa. */
20135 static bool
20136 rs6000_secondary_memory_needed (enum reg_class from_class,
20137 enum reg_class to_class,
20138 machine_mode mode)
20140 enum rs6000_reg_type from_type, to_type;
20141 bool altivec_p = ((from_class == ALTIVEC_REGS)
20142 || (to_class == ALTIVEC_REGS));
20144 /* If a simple/direct move is available, we don't need secondary memory */
20145 from_type = reg_class_to_reg_type[(int)from_class];
20146 to_type = reg_class_to_reg_type[(int)to_class];
20148 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20149 (secondary_reload_info *)0, altivec_p))
20150 return false;
20152 /* If we have a floating point or vector register class, we need to use
20153 memory to transfer the data. */
20154 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20155 return true;
20157 return false;
20160 /* Debug version of rs6000_secondary_memory_needed. */
20161 static bool
20162 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
20163 enum reg_class to_class,
20164 machine_mode mode)
20166 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
20168 fprintf (stderr,
20169 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20170 "to_class = %s, mode = %s\n",
20171 ret ? "true" : "false",
20172 reg_class_names[from_class],
20173 reg_class_names[to_class],
20174 GET_MODE_NAME (mode));
20176 return ret;
20179 /* Return the register class of a scratch register needed to copy IN into
20180 or out of a register in RCLASS in MODE. If it can be done directly,
20181 NO_REGS is returned. */
20183 static enum reg_class
20184 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20185 rtx in)
20187 int regno;
20189 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20190 #if TARGET_MACHO
20191 && MACHOPIC_INDIRECT
20192 #endif
20195 /* We cannot copy a symbolic operand directly into anything
20196 other than BASE_REGS for TARGET_ELF. So indicate that a
20197 register from BASE_REGS is needed as an intermediate
20198 register.
20200 On Darwin, pic addresses require a load from memory, which
20201 needs a base register. */
20202 if (rclass != BASE_REGS
20203 && (GET_CODE (in) == SYMBOL_REF
20204 || GET_CODE (in) == HIGH
20205 || GET_CODE (in) == LABEL_REF
20206 || GET_CODE (in) == CONST))
20207 return BASE_REGS;
20210 if (GET_CODE (in) == REG)
20212 regno = REGNO (in);
20213 if (regno >= FIRST_PSEUDO_REGISTER)
20215 regno = true_regnum (in);
20216 if (regno >= FIRST_PSEUDO_REGISTER)
20217 regno = -1;
20220 else if (GET_CODE (in) == SUBREG)
20222 regno = true_regnum (in);
20223 if (regno >= FIRST_PSEUDO_REGISTER)
20224 regno = -1;
20226 else
20227 regno = -1;
20229 /* If we have VSX register moves, prefer moving scalar values between
20230 Altivec registers and GPR by going via an FPR (and then via memory)
20231 instead of reloading the secondary memory address for Altivec moves. */
20232 if (TARGET_VSX
20233 && GET_MODE_SIZE (mode) < 16
20234 && !mode_supports_vmx_dform (mode)
20235 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20236 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20237 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20238 && (regno >= 0 && INT_REGNO_P (regno)))))
20239 return FLOAT_REGS;
20241 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20242 into anything. */
20243 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20244 || (regno >= 0 && INT_REGNO_P (regno)))
20245 return NO_REGS;
20247 /* Constants, memory, and VSX registers can go into VSX registers (both the
20248 traditional floating point and the altivec registers). */
20249 if (rclass == VSX_REGS
20250 && (regno == -1 || VSX_REGNO_P (regno)))
20251 return NO_REGS;
20253 /* Constants, memory, and FP registers can go into FP registers. */
20254 if ((regno == -1 || FP_REGNO_P (regno))
20255 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20256 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20258 /* Memory, and AltiVec registers can go into AltiVec registers. */
20259 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20260 && rclass == ALTIVEC_REGS)
20261 return NO_REGS;
20263 /* We can copy among the CR registers. */
20264 if ((rclass == CR_REGS || rclass == CR0_REGS)
20265 && regno >= 0 && CR_REGNO_P (regno))
20266 return NO_REGS;
20268 /* Otherwise, we need GENERAL_REGS. */
20269 return GENERAL_REGS;
20272 /* Debug version of rs6000_secondary_reload_class. */
20273 static enum reg_class
20274 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20275 machine_mode mode, rtx in)
20277 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20278 fprintf (stderr,
20279 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20280 "mode = %s, input rtx:\n",
20281 reg_class_names[ret], reg_class_names[rclass],
20282 GET_MODE_NAME (mode));
20283 debug_rtx (in);
20285 return ret;
20288 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20290 static bool
20291 rs6000_cannot_change_mode_class (machine_mode from,
20292 machine_mode to,
20293 enum reg_class rclass)
20295 unsigned from_size = GET_MODE_SIZE (from);
20296 unsigned to_size = GET_MODE_SIZE (to);
20298 if (from_size != to_size)
20300 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20302 if (reg_classes_intersect_p (xclass, rclass))
20304 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
20305 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
20306 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20307 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20309 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20310 single register under VSX because the scalar part of the register
20311 is in the upper 64-bits, and not the lower 64-bits. Types like
20312 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20313 IEEE floating point can't overlap, and neither can small
20314 values. */
20316 if (to_float128_vector_p && from_float128_vector_p)
20317 return false;
20319 else if (to_float128_vector_p || from_float128_vector_p)
20320 return true;
20322 /* TDmode in floating-mode registers must always go into a register
20323 pair with the most significant word in the even-numbered register
20324 to match ISA requirements. In little-endian mode, this does not
20325 match subreg numbering, so we cannot allow subregs. */
20326 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20327 return true;
20329 if (from_size < 8 || to_size < 8)
20330 return true;
20332 if (from_size == 8 && (8 * to_nregs) != to_size)
20333 return true;
20335 if (to_size == 8 && (8 * from_nregs) != from_size)
20336 return true;
20338 return false;
20340 else
20341 return false;
20344 if (TARGET_E500_DOUBLE
20345 && ((((to) == DFmode) + ((from) == DFmode)) == 1
20346 || (((to) == TFmode) + ((from) == TFmode)) == 1
20347 || (((to) == IFmode) + ((from) == IFmode)) == 1
20348 || (((to) == KFmode) + ((from) == KFmode)) == 1
20349 || (((to) == DDmode) + ((from) == DDmode)) == 1
20350 || (((to) == TDmode) + ((from) == TDmode)) == 1
20351 || (((to) == DImode) + ((from) == DImode)) == 1))
20352 return true;
20354 /* Since the VSX register set includes traditional floating point registers
20355 and altivec registers, just check for the size being different instead of
20356 trying to check whether the modes are vector modes. Otherwise it won't
20357 allow say DF and DI to change classes. For types like TFmode and TDmode
20358 that take 2 64-bit registers, rather than a single 128-bit register, don't
20359 allow subregs of those types to other 128 bit types. */
20360 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20362 unsigned num_regs = (from_size + 15) / 16;
20363 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
20364 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
20365 return true;
20367 return (from_size != 8 && from_size != 16);
20370 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20371 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20372 return true;
20374 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
20375 && reg_classes_intersect_p (GENERAL_REGS, rclass))
20376 return true;
20378 return false;
20381 /* Debug version of rs6000_cannot_change_mode_class. */
20382 static bool
20383 rs6000_debug_cannot_change_mode_class (machine_mode from,
20384 machine_mode to,
20385 enum reg_class rclass)
20387 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
20389 fprintf (stderr,
20390 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20391 "to = %s, rclass = %s\n",
20392 ret ? "true" : "false",
20393 GET_MODE_NAME (from), GET_MODE_NAME (to),
20394 reg_class_names[rclass]);
20396 return ret;
20399 /* Return a string to do a move operation of 128 bits of data. */
20401 const char *
20402 rs6000_output_move_128bit (rtx operands[])
20404 rtx dest = operands[0];
20405 rtx src = operands[1];
20406 machine_mode mode = GET_MODE (dest);
20407 int dest_regno;
20408 int src_regno;
20409 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20410 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20412 if (REG_P (dest))
20414 dest_regno = REGNO (dest);
20415 dest_gpr_p = INT_REGNO_P (dest_regno);
20416 dest_fp_p = FP_REGNO_P (dest_regno);
20417 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20418 dest_vsx_p = dest_fp_p | dest_vmx_p;
20420 else
20422 dest_regno = -1;
20423 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20426 if (REG_P (src))
20428 src_regno = REGNO (src);
20429 src_gpr_p = INT_REGNO_P (src_regno);
20430 src_fp_p = FP_REGNO_P (src_regno);
20431 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20432 src_vsx_p = src_fp_p | src_vmx_p;
20434 else
20436 src_regno = -1;
20437 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20440 /* Register moves. */
20441 if (dest_regno >= 0 && src_regno >= 0)
20443 if (dest_gpr_p)
20445 if (src_gpr_p)
20446 return "#";
20448 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20449 return (WORDS_BIG_ENDIAN
20450 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20451 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20453 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20454 return "#";
20457 else if (TARGET_VSX && dest_vsx_p)
20459 if (src_vsx_p)
20460 return "xxlor %x0,%x1,%x1";
20462 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20463 return (WORDS_BIG_ENDIAN
20464 ? "mtvsrdd %x0,%1,%L1"
20465 : "mtvsrdd %x0,%L1,%1");
20467 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20468 return "#";
20471 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20472 return "vor %0,%1,%1";
20474 else if (dest_fp_p && src_fp_p)
20475 return "#";
20478 /* Loads. */
20479 else if (dest_regno >= 0 && MEM_P (src))
20481 if (dest_gpr_p)
20483 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20484 return "lq %0,%1";
20485 else
20486 return "#";
20489 else if (TARGET_ALTIVEC && dest_vmx_p
20490 && altivec_indexed_or_indirect_operand (src, mode))
20491 return "lvx %0,%y1";
20493 else if (TARGET_VSX && dest_vsx_p)
20495 if (mode_supports_vsx_dform_quad (mode)
20496 && quad_address_p (XEXP (src, 0), mode, true))
20497 return "lxv %x0,%1";
20499 else if (TARGET_P9_VECTOR)
20500 return "lxvx %x0,%y1";
20502 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20503 return "lxvw4x %x0,%y1";
20505 else
20506 return "lxvd2x %x0,%y1";
20509 else if (TARGET_ALTIVEC && dest_vmx_p)
20510 return "lvx %0,%y1";
20512 else if (dest_fp_p)
20513 return "#";
20516 /* Stores. */
20517 else if (src_regno >= 0 && MEM_P (dest))
20519 if (src_gpr_p)
20521 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20522 return "stq %1,%0";
20523 else
20524 return "#";
20527 else if (TARGET_ALTIVEC && src_vmx_p
20528 && altivec_indexed_or_indirect_operand (src, mode))
20529 return "stvx %1,%y0";
20531 else if (TARGET_VSX && src_vsx_p)
20533 if (mode_supports_vsx_dform_quad (mode)
20534 && quad_address_p (XEXP (dest, 0), mode, true))
20535 return "stxv %x1,%0";
20537 else if (TARGET_P9_VECTOR)
20538 return "stxvx %x1,%y0";
20540 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20541 return "stxvw4x %x1,%y0";
20543 else
20544 return "stxvd2x %x1,%y0";
20547 else if (TARGET_ALTIVEC && src_vmx_p)
20548 return "stvx %1,%y0";
20550 else if (src_fp_p)
20551 return "#";
20554 /* Constants. */
20555 else if (dest_regno >= 0
20556 && (GET_CODE (src) == CONST_INT
20557 || GET_CODE (src) == CONST_WIDE_INT
20558 || GET_CODE (src) == CONST_DOUBLE
20559 || GET_CODE (src) == CONST_VECTOR))
20561 if (dest_gpr_p)
20562 return "#";
20564 else if ((dest_vmx_p && TARGET_ALTIVEC)
20565 || (dest_vsx_p && TARGET_VSX))
20566 return output_vec_const_move (operands);
20569 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20572 /* Validate a 128-bit move. */
20573 bool
20574 rs6000_move_128bit_ok_p (rtx operands[])
20576 machine_mode mode = GET_MODE (operands[0]);
20577 return (gpc_reg_operand (operands[0], mode)
20578 || gpc_reg_operand (operands[1], mode));
20581 /* Return true if a 128-bit move needs to be split. */
20582 bool
20583 rs6000_split_128bit_ok_p (rtx operands[])
20585 if (!reload_completed)
20586 return false;
20588 if (!gpr_or_gpr_p (operands[0], operands[1]))
20589 return false;
20591 if (quad_load_store_p (operands[0], operands[1]))
20592 return false;
20594 return true;
20598 /* Given a comparison operation, return the bit number in CCR to test. We
20599 know this is a valid comparison.
20601 SCC_P is 1 if this is for an scc. That means that %D will have been
20602 used instead of %C, so the bits will be in different places.
20604 Return -1 if OP isn't a valid comparison for some reason. */
20607 ccr_bit (rtx op, int scc_p)
20609 enum rtx_code code = GET_CODE (op);
20610 machine_mode cc_mode;
20611 int cc_regnum;
20612 int base_bit;
20613 rtx reg;
20615 if (!COMPARISON_P (op))
20616 return -1;
20618 reg = XEXP (op, 0);
20620 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20622 cc_mode = GET_MODE (reg);
20623 cc_regnum = REGNO (reg);
20624 base_bit = 4 * (cc_regnum - CR0_REGNO);
20626 validate_condition_mode (code, cc_mode);
20628 /* When generating a sCOND operation, only positive conditions are
20629 allowed. */
20630 gcc_assert (!scc_p
20631 || code == EQ || code == GT || code == LT || code == UNORDERED
20632 || code == GTU || code == LTU);
20634 switch (code)
20636 case NE:
20637 return scc_p ? base_bit + 3 : base_bit + 2;
20638 case EQ:
20639 return base_bit + 2;
20640 case GT: case GTU: case UNLE:
20641 return base_bit + 1;
20642 case LT: case LTU: case UNGE:
20643 return base_bit;
20644 case ORDERED: case UNORDERED:
20645 return base_bit + 3;
20647 case GE: case GEU:
20648 /* If scc, we will have done a cror to put the bit in the
20649 unordered position. So test that bit. For integer, this is ! LT
20650 unless this is an scc insn. */
20651 return scc_p ? base_bit + 3 : base_bit;
20653 case LE: case LEU:
20654 return scc_p ? base_bit + 3 : base_bit + 1;
20656 default:
20657 gcc_unreachable ();
20661 /* Return the GOT register. */
20664 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20666 /* The second flow pass currently (June 1999) can't update
20667 regs_ever_live without disturbing other parts of the compiler, so
20668 update it here to make the prolog/epilogue code happy. */
20669 if (!can_create_pseudo_p ()
20670 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20671 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20673 crtl->uses_pic_offset_table = 1;
20675 return pic_offset_table_rtx;
20678 static rs6000_stack_t stack_info;
20680 /* Function to init struct machine_function.
20681 This will be called, via a pointer variable,
20682 from push_function_context. */
20684 static struct machine_function *
20685 rs6000_init_machine_status (void)
20687 stack_info.reload_completed = 0;
20688 return ggc_cleared_alloc<machine_function> ();
20691 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20693 /* Write out a function code label. */
20695 void
20696 rs6000_output_function_entry (FILE *file, const char *fname)
20698 if (fname[0] != '.')
20700 switch (DEFAULT_ABI)
20702 default:
20703 gcc_unreachable ();
20705 case ABI_AIX:
20706 if (DOT_SYMBOLS)
20707 putc ('.', file);
20708 else
20709 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20710 break;
20712 case ABI_ELFv2:
20713 case ABI_V4:
20714 case ABI_DARWIN:
20715 break;
20719 RS6000_OUTPUT_BASENAME (file, fname);
20722 /* Print an operand. Recognize special options, documented below. */
20724 #if TARGET_ELF
20725 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20726 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20727 #else
20728 #define SMALL_DATA_RELOC "sda21"
20729 #define SMALL_DATA_REG 0
20730 #endif
20732 void
20733 print_operand (FILE *file, rtx x, int code)
20735 int i;
20736 unsigned HOST_WIDE_INT uval;
20738 switch (code)
20740 /* %a is output_address. */
20742 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20743 output_operand. */
20745 case 'D':
20746 /* Like 'J' but get to the GT bit only. */
20747 gcc_assert (REG_P (x));
20749 /* Bit 1 is GT bit. */
20750 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20752 /* Add one for shift count in rlinm for scc. */
20753 fprintf (file, "%d", i + 1);
20754 return;
20756 case 'e':
20757 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20758 if (! INT_P (x))
20760 output_operand_lossage ("invalid %%e value");
20761 return;
20764 uval = INTVAL (x);
20765 if ((uval & 0xffff) == 0 && uval != 0)
20766 putc ('s', file);
20767 return;
20769 case 'E':
20770 /* X is a CR register. Print the number of the EQ bit of the CR */
20771 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20772 output_operand_lossage ("invalid %%E value");
20773 else
20774 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20775 return;
20777 case 'f':
20778 /* X is a CR register. Print the shift count needed to move it
20779 to the high-order four bits. */
20780 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20781 output_operand_lossage ("invalid %%f value");
20782 else
20783 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20784 return;
20786 case 'F':
20787 /* Similar, but print the count for the rotate in the opposite
20788 direction. */
20789 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20790 output_operand_lossage ("invalid %%F value");
20791 else
20792 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20793 return;
20795 case 'G':
20796 /* X is a constant integer. If it is negative, print "m",
20797 otherwise print "z". This is to make an aze or ame insn. */
20798 if (GET_CODE (x) != CONST_INT)
20799 output_operand_lossage ("invalid %%G value");
20800 else if (INTVAL (x) >= 0)
20801 putc ('z', file);
20802 else
20803 putc ('m', file);
20804 return;
20806 case 'h':
20807 /* If constant, output low-order five bits. Otherwise, write
20808 normally. */
20809 if (INT_P (x))
20810 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20811 else
20812 print_operand (file, x, 0);
20813 return;
20815 case 'H':
20816 /* If constant, output low-order six bits. Otherwise, write
20817 normally. */
20818 if (INT_P (x))
20819 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20820 else
20821 print_operand (file, x, 0);
20822 return;
20824 case 'I':
20825 /* Print `i' if this is a constant, else nothing. */
20826 if (INT_P (x))
20827 putc ('i', file);
20828 return;
20830 case 'j':
20831 /* Write the bit number in CCR for jump. */
20832 i = ccr_bit (x, 0);
20833 if (i == -1)
20834 output_operand_lossage ("invalid %%j code");
20835 else
20836 fprintf (file, "%d", i);
20837 return;
20839 case 'J':
20840 /* Similar, but add one for shift count in rlinm for scc and pass
20841 scc flag to `ccr_bit'. */
20842 i = ccr_bit (x, 1);
20843 if (i == -1)
20844 output_operand_lossage ("invalid %%J code");
20845 else
20846 /* If we want bit 31, write a shift count of zero, not 32. */
20847 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20848 return;
20850 case 'k':
20851 /* X must be a constant. Write the 1's complement of the
20852 constant. */
20853 if (! INT_P (x))
20854 output_operand_lossage ("invalid %%k value");
20855 else
20856 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20857 return;
20859 case 'K':
20860 /* X must be a symbolic constant on ELF. Write an
20861 expression suitable for an 'addi' that adds in the low 16
20862 bits of the MEM. */
20863 if (GET_CODE (x) == CONST)
20865 if (GET_CODE (XEXP (x, 0)) != PLUS
20866 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20867 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20868 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20869 output_operand_lossage ("invalid %%K value");
20871 print_operand_address (file, x);
20872 fputs ("@l", file);
20873 return;
20875 /* %l is output_asm_label. */
20877 case 'L':
20878 /* Write second word of DImode or DFmode reference. Works on register
20879 or non-indexed memory only. */
20880 if (REG_P (x))
20881 fputs (reg_names[REGNO (x) + 1], file);
20882 else if (MEM_P (x))
20884 machine_mode mode = GET_MODE (x);
20885 /* Handle possible auto-increment. Since it is pre-increment and
20886 we have already done it, we can just use an offset of word. */
20887 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20888 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20889 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20890 UNITS_PER_WORD));
20891 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20892 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20893 UNITS_PER_WORD));
20894 else
20895 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20896 UNITS_PER_WORD),
20897 0));
20899 if (small_data_operand (x, GET_MODE (x)))
20900 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20901 reg_names[SMALL_DATA_REG]);
20903 return;
20905 case 'N':
20906 /* Write the number of elements in the vector times 4. */
20907 if (GET_CODE (x) != PARALLEL)
20908 output_operand_lossage ("invalid %%N value");
20909 else
20910 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20911 return;
20913 case 'O':
20914 /* Similar, but subtract 1 first. */
20915 if (GET_CODE (x) != PARALLEL)
20916 output_operand_lossage ("invalid %%O value");
20917 else
20918 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20919 return;
20921 case 'p':
20922 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20923 if (! INT_P (x)
20924 || INTVAL (x) < 0
20925 || (i = exact_log2 (INTVAL (x))) < 0)
20926 output_operand_lossage ("invalid %%p value");
20927 else
20928 fprintf (file, "%d", i);
20929 return;
20931 case 'P':
20932 /* The operand must be an indirect memory reference. The result
20933 is the register name. */
20934 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20935 || REGNO (XEXP (x, 0)) >= 32)
20936 output_operand_lossage ("invalid %%P value");
20937 else
20938 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20939 return;
20941 case 'q':
20942 /* This outputs the logical code corresponding to a boolean
20943 expression. The expression may have one or both operands
20944 negated (if one, only the first one). For condition register
20945 logical operations, it will also treat the negated
20946 CR codes as NOTs, but not handle NOTs of them. */
20948 const char *const *t = 0;
20949 const char *s;
20950 enum rtx_code code = GET_CODE (x);
20951 static const char * const tbl[3][3] = {
20952 { "and", "andc", "nor" },
20953 { "or", "orc", "nand" },
20954 { "xor", "eqv", "xor" } };
20956 if (code == AND)
20957 t = tbl[0];
20958 else if (code == IOR)
20959 t = tbl[1];
20960 else if (code == XOR)
20961 t = tbl[2];
20962 else
20963 output_operand_lossage ("invalid %%q value");
20965 if (GET_CODE (XEXP (x, 0)) != NOT)
20966 s = t[0];
20967 else
20969 if (GET_CODE (XEXP (x, 1)) == NOT)
20970 s = t[2];
20971 else
20972 s = t[1];
20975 fputs (s, file);
20977 return;
20979 case 'Q':
20980 if (! TARGET_MFCRF)
20981 return;
20982 fputc (',', file);
20983 /* FALLTHRU */
20985 case 'R':
20986 /* X is a CR register. Print the mask for `mtcrf'. */
20987 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20988 output_operand_lossage ("invalid %%R value");
20989 else
20990 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20991 return;
20993 case 's':
20994 /* Low 5 bits of 32 - value */
20995 if (! INT_P (x))
20996 output_operand_lossage ("invalid %%s value");
20997 else
20998 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20999 return;
21001 case 't':
21002 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21003 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21005 /* Bit 3 is OV bit. */
21006 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21008 /* If we want bit 31, write a shift count of zero, not 32. */
21009 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21010 return;
21012 case 'T':
21013 /* Print the symbolic name of a branch target register. */
21014 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21015 && REGNO (x) != CTR_REGNO))
21016 output_operand_lossage ("invalid %%T value");
21017 else if (REGNO (x) == LR_REGNO)
21018 fputs ("lr", file);
21019 else
21020 fputs ("ctr", file);
21021 return;
21023 case 'u':
21024 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21025 for use in unsigned operand. */
21026 if (! INT_P (x))
21028 output_operand_lossage ("invalid %%u value");
21029 return;
21032 uval = INTVAL (x);
21033 if ((uval & 0xffff) == 0)
21034 uval >>= 16;
21036 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21037 return;
21039 case 'v':
21040 /* High-order 16 bits of constant for use in signed operand. */
21041 if (! INT_P (x))
21042 output_operand_lossage ("invalid %%v value");
21043 else
21044 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21045 (INTVAL (x) >> 16) & 0xffff);
21046 return;
21048 case 'U':
21049 /* Print `u' if this has an auto-increment or auto-decrement. */
21050 if (MEM_P (x)
21051 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21052 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21053 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21054 putc ('u', file);
21055 return;
21057 case 'V':
21058 /* Print the trap code for this operand. */
21059 switch (GET_CODE (x))
21061 case EQ:
21062 fputs ("eq", file); /* 4 */
21063 break;
21064 case NE:
21065 fputs ("ne", file); /* 24 */
21066 break;
21067 case LT:
21068 fputs ("lt", file); /* 16 */
21069 break;
21070 case LE:
21071 fputs ("le", file); /* 20 */
21072 break;
21073 case GT:
21074 fputs ("gt", file); /* 8 */
21075 break;
21076 case GE:
21077 fputs ("ge", file); /* 12 */
21078 break;
21079 case LTU:
21080 fputs ("llt", file); /* 2 */
21081 break;
21082 case LEU:
21083 fputs ("lle", file); /* 6 */
21084 break;
21085 case GTU:
21086 fputs ("lgt", file); /* 1 */
21087 break;
21088 case GEU:
21089 fputs ("lge", file); /* 5 */
21090 break;
21091 default:
21092 gcc_unreachable ();
21094 break;
21096 case 'w':
21097 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21098 normally. */
21099 if (INT_P (x))
21100 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21101 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21102 else
21103 print_operand (file, x, 0);
21104 return;
21106 case 'x':
21107 /* X is a FPR or Altivec register used in a VSX context. */
21108 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21109 output_operand_lossage ("invalid %%x value");
21110 else
21112 int reg = REGNO (x);
21113 int vsx_reg = (FP_REGNO_P (reg)
21114 ? reg - 32
21115 : reg - FIRST_ALTIVEC_REGNO + 32);
21117 #ifdef TARGET_REGNAMES
21118 if (TARGET_REGNAMES)
21119 fprintf (file, "%%vs%d", vsx_reg);
21120 else
21121 #endif
21122 fprintf (file, "%d", vsx_reg);
21124 return;
21126 case 'X':
21127 if (MEM_P (x)
21128 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21129 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21130 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21131 putc ('x', file);
21132 return;
21134 case 'Y':
21135 /* Like 'L', for third word of TImode/PTImode */
21136 if (REG_P (x))
21137 fputs (reg_names[REGNO (x) + 2], file);
21138 else if (MEM_P (x))
21140 machine_mode mode = GET_MODE (x);
21141 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21142 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21143 output_address (mode, plus_constant (Pmode,
21144 XEXP (XEXP (x, 0), 0), 8));
21145 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21146 output_address (mode, plus_constant (Pmode,
21147 XEXP (XEXP (x, 0), 0), 8));
21148 else
21149 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21150 if (small_data_operand (x, GET_MODE (x)))
21151 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21152 reg_names[SMALL_DATA_REG]);
21154 return;
21156 case 'z':
21157 /* X is a SYMBOL_REF. Write out the name preceded by a
21158 period and without any trailing data in brackets. Used for function
21159 names. If we are configured for System V (or the embedded ABI) on
21160 the PowerPC, do not emit the period, since those systems do not use
21161 TOCs and the like. */
21162 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21164 /* For macho, check to see if we need a stub. */
21165 if (TARGET_MACHO)
21167 const char *name = XSTR (x, 0);
21168 #if TARGET_MACHO
21169 if (darwin_emit_branch_islands
21170 && MACHOPIC_INDIRECT
21171 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21172 name = machopic_indirection_name (x, /*stub_p=*/true);
21173 #endif
21174 assemble_name (file, name);
21176 else if (!DOT_SYMBOLS)
21177 assemble_name (file, XSTR (x, 0));
21178 else
21179 rs6000_output_function_entry (file, XSTR (x, 0));
21180 return;
21182 case 'Z':
21183 /* Like 'L', for last word of TImode/PTImode. */
21184 if (REG_P (x))
21185 fputs (reg_names[REGNO (x) + 3], file);
21186 else if (MEM_P (x))
21188 machine_mode mode = GET_MODE (x);
21189 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21190 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21191 output_address (mode, plus_constant (Pmode,
21192 XEXP (XEXP (x, 0), 0), 12));
21193 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21194 output_address (mode, plus_constant (Pmode,
21195 XEXP (XEXP (x, 0), 0), 12));
21196 else
21197 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21198 if (small_data_operand (x, GET_MODE (x)))
21199 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21200 reg_names[SMALL_DATA_REG]);
21202 return;
21204 /* Print AltiVec or SPE memory operand. */
21205 case 'y':
21207 rtx tmp;
21209 gcc_assert (MEM_P (x));
21211 tmp = XEXP (x, 0);
21213 /* Ugly hack because %y is overloaded. */
21214 if ((TARGET_SPE || TARGET_E500_DOUBLE)
21215 && (GET_MODE_SIZE (GET_MODE (x)) == 8
21216 || FLOAT128_2REG_P (GET_MODE (x))
21217 || GET_MODE (x) == TImode
21218 || GET_MODE (x) == PTImode))
21220 /* Handle [reg]. */
21221 if (REG_P (tmp))
21223 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
21224 break;
21226 /* Handle [reg+UIMM]. */
21227 else if (GET_CODE (tmp) == PLUS &&
21228 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
21230 int x;
21232 gcc_assert (REG_P (XEXP (tmp, 0)));
21234 x = INTVAL (XEXP (tmp, 1));
21235 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
21236 break;
21239 /* Fall through. Must be [reg+reg]. */
21241 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21242 && GET_CODE (tmp) == AND
21243 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21244 && INTVAL (XEXP (tmp, 1)) == -16)
21245 tmp = XEXP (tmp, 0);
21246 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21247 && GET_CODE (tmp) == PRE_MODIFY)
21248 tmp = XEXP (tmp, 1);
21249 if (REG_P (tmp))
21250 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21251 else
21253 if (GET_CODE (tmp) != PLUS
21254 || !REG_P (XEXP (tmp, 0))
21255 || !REG_P (XEXP (tmp, 1)))
21257 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21258 break;
21261 if (REGNO (XEXP (tmp, 0)) == 0)
21262 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21263 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21264 else
21265 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21266 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21268 break;
21271 case 0:
21272 if (REG_P (x))
21273 fprintf (file, "%s", reg_names[REGNO (x)]);
21274 else if (MEM_P (x))
21276 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21277 know the width from the mode. */
21278 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21279 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21280 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21281 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21282 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21283 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21284 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21285 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21286 else
21287 output_address (GET_MODE (x), XEXP (x, 0));
21289 else
21291 if (toc_relative_expr_p (x, false))
21292 /* This hack along with a corresponding hack in
21293 rs6000_output_addr_const_extra arranges to output addends
21294 where the assembler expects to find them. eg.
21295 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21296 without this hack would be output as "x@toc+4". We
21297 want "x+4@toc". */
21298 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
21299 else
21300 output_addr_const (file, x);
21302 return;
21304 case '&':
21305 if (const char *name = get_some_local_dynamic_name ())
21306 assemble_name (file, name);
21307 else
21308 output_operand_lossage ("'%%&' used without any "
21309 "local dynamic TLS references");
21310 return;
21312 default:
21313 output_operand_lossage ("invalid %%xn code");
21317 /* Print the address of an operand. */
21319 void
21320 print_operand_address (FILE *file, rtx x)
21322 if (REG_P (x))
21323 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21324 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21325 || GET_CODE (x) == LABEL_REF)
21327 output_addr_const (file, x);
21328 if (small_data_operand (x, GET_MODE (x)))
21329 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21330 reg_names[SMALL_DATA_REG]);
21331 else
21332 gcc_assert (!TARGET_TOC);
21334 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21335 && REG_P (XEXP (x, 1)))
21337 if (REGNO (XEXP (x, 0)) == 0)
21338 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21339 reg_names[ REGNO (XEXP (x, 0)) ]);
21340 else
21341 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21342 reg_names[ REGNO (XEXP (x, 1)) ]);
21344 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21345 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21346 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21347 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21348 #if TARGET_MACHO
21349 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21350 && CONSTANT_P (XEXP (x, 1)))
21352 fprintf (file, "lo16(");
21353 output_addr_const (file, XEXP (x, 1));
21354 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21356 #endif
21357 #if TARGET_ELF
21358 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21359 && CONSTANT_P (XEXP (x, 1)))
21361 output_addr_const (file, XEXP (x, 1));
21362 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21364 #endif
21365 else if (toc_relative_expr_p (x, false))
21367 /* This hack along with a corresponding hack in
21368 rs6000_output_addr_const_extra arranges to output addends
21369 where the assembler expects to find them. eg.
21370 (lo_sum (reg 9)
21371 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21372 without this hack would be output as "x@toc+8@l(9)". We
21373 want "x+8@toc@l(9)". */
21374 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
21375 if (GET_CODE (x) == LO_SUM)
21376 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21377 else
21378 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
21380 else
21381 gcc_unreachable ();
21384 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
21386 static bool
21387 rs6000_output_addr_const_extra (FILE *file, rtx x)
21389 if (GET_CODE (x) == UNSPEC)
21390 switch (XINT (x, 1))
21392 case UNSPEC_TOCREL:
21393 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21394 && REG_P (XVECEXP (x, 0, 1))
21395 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21396 output_addr_const (file, XVECEXP (x, 0, 0));
21397 if (x == tocrel_base && tocrel_offset != const0_rtx)
21399 if (INTVAL (tocrel_offset) >= 0)
21400 fprintf (file, "+");
21401 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
21403 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21405 putc ('-', file);
21406 assemble_name (file, toc_label_name);
21407 need_toc_init = 1;
21409 else if (TARGET_ELF)
21410 fputs ("@toc", file);
21411 return true;
21413 #if TARGET_MACHO
21414 case UNSPEC_MACHOPIC_OFFSET:
21415 output_addr_const (file, XVECEXP (x, 0, 0));
21416 putc ('-', file);
21417 machopic_output_function_base_name (file);
21418 return true;
21419 #endif
21421 return false;
21424 /* Target hook for assembling integer objects. The PowerPC version has
21425 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21426 is defined. It also needs to handle DI-mode objects on 64-bit
21427 targets. */
21429 static bool
21430 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21432 #ifdef RELOCATABLE_NEEDS_FIXUP
21433 /* Special handling for SI values. */
21434 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21436 static int recurse = 0;
21438 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21439 the .fixup section. Since the TOC section is already relocated, we
21440 don't need to mark it here. We used to skip the text section, but it
21441 should never be valid for relocated addresses to be placed in the text
21442 section. */
21443 if (DEFAULT_ABI == ABI_V4
21444 && (TARGET_RELOCATABLE || flag_pic > 1)
21445 && in_section != toc_section
21446 && !recurse
21447 && !CONST_SCALAR_INT_P (x)
21448 && CONSTANT_P (x))
21450 char buf[256];
21452 recurse = 1;
21453 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21454 fixuplabelno++;
21455 ASM_OUTPUT_LABEL (asm_out_file, buf);
21456 fprintf (asm_out_file, "\t.long\t(");
21457 output_addr_const (asm_out_file, x);
21458 fprintf (asm_out_file, ")@fixup\n");
21459 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21460 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21461 fprintf (asm_out_file, "\t.long\t");
21462 assemble_name (asm_out_file, buf);
21463 fprintf (asm_out_file, "\n\t.previous\n");
21464 recurse = 0;
21465 return true;
21467 /* Remove initial .'s to turn a -mcall-aixdesc function
21468 address into the address of the descriptor, not the function
21469 itself. */
21470 else if (GET_CODE (x) == SYMBOL_REF
21471 && XSTR (x, 0)[0] == '.'
21472 && DEFAULT_ABI == ABI_AIX)
21474 const char *name = XSTR (x, 0);
21475 while (*name == '.')
21476 name++;
21478 fprintf (asm_out_file, "\t.long\t%s\n", name);
21479 return true;
21482 #endif /* RELOCATABLE_NEEDS_FIXUP */
21483 return default_assemble_integer (x, size, aligned_p);
21486 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21487 /* Emit an assembler directive to set symbol visibility for DECL to
21488 VISIBILITY_TYPE. */
21490 static void
21491 rs6000_assemble_visibility (tree decl, int vis)
21493 if (TARGET_XCOFF)
21494 return;
21496 /* Functions need to have their entry point symbol visibility set as
21497 well as their descriptor symbol visibility. */
21498 if (DEFAULT_ABI == ABI_AIX
21499 && DOT_SYMBOLS
21500 && TREE_CODE (decl) == FUNCTION_DECL)
21502 static const char * const visibility_types[] = {
21503 NULL, "internal", "hidden", "protected"
21506 const char *name, *type;
21508 name = ((* targetm.strip_name_encoding)
21509 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21510 type = visibility_types[vis];
21512 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21513 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21515 else
21516 default_assemble_visibility (decl, vis);
21518 #endif
21520 enum rtx_code
21521 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21523 /* Reversal of FP compares takes care -- an ordered compare
21524 becomes an unordered compare and vice versa. */
21525 if (mode == CCFPmode
21526 && (!flag_finite_math_only
21527 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21528 || code == UNEQ || code == LTGT))
21529 return reverse_condition_maybe_unordered (code);
21530 else
21531 return reverse_condition (code);
21534 /* Generate a compare for CODE. Return a brand-new rtx that
21535 represents the result of the compare. */
21537 static rtx
21538 rs6000_generate_compare (rtx cmp, machine_mode mode)
21540 machine_mode comp_mode;
21541 rtx compare_result;
21542 enum rtx_code code = GET_CODE (cmp);
21543 rtx op0 = XEXP (cmp, 0);
21544 rtx op1 = XEXP (cmp, 1);
21546 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21547 comp_mode = CCmode;
21548 else if (FLOAT_MODE_P (mode))
21549 comp_mode = CCFPmode;
21550 else if (code == GTU || code == LTU
21551 || code == GEU || code == LEU)
21552 comp_mode = CCUNSmode;
21553 else if ((code == EQ || code == NE)
21554 && unsigned_reg_p (op0)
21555 && (unsigned_reg_p (op1)
21556 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21557 /* These are unsigned values, perhaps there will be a later
21558 ordering compare that can be shared with this one. */
21559 comp_mode = CCUNSmode;
21560 else
21561 comp_mode = CCmode;
21563 /* If we have an unsigned compare, make sure we don't have a signed value as
21564 an immediate. */
21565 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21566 && INTVAL (op1) < 0)
21568 op0 = copy_rtx_if_shared (op0);
21569 op1 = force_reg (GET_MODE (op0), op1);
21570 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21573 /* First, the compare. */
21574 compare_result = gen_reg_rtx (comp_mode);
21576 /* E500 FP compare instructions on the GPRs. Yuck! */
21577 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
21578 && FLOAT_MODE_P (mode))
21580 rtx cmp, or_result, compare_result2;
21581 machine_mode op_mode = GET_MODE (op0);
21582 bool reverse_p;
21584 if (op_mode == VOIDmode)
21585 op_mode = GET_MODE (op1);
21587 /* First reverse the condition codes that aren't directly supported. */
21588 switch (code)
21590 case NE:
21591 case UNLT:
21592 case UNLE:
21593 case UNGT:
21594 case UNGE:
21595 code = reverse_condition_maybe_unordered (code);
21596 reverse_p = true;
21597 break;
21599 case EQ:
21600 case LT:
21601 case LE:
21602 case GT:
21603 case GE:
21604 reverse_p = false;
21605 break;
21607 default:
21608 gcc_unreachable ();
21611 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
21612 This explains the following mess. */
21614 switch (code)
21616 case EQ:
21617 switch (op_mode)
21619 case SFmode:
21620 cmp = (flag_finite_math_only && !flag_trapping_math)
21621 ? gen_tstsfeq_gpr (compare_result, op0, op1)
21622 : gen_cmpsfeq_gpr (compare_result, op0, op1);
21623 break;
21625 case DFmode:
21626 cmp = (flag_finite_math_only && !flag_trapping_math)
21627 ? gen_tstdfeq_gpr (compare_result, op0, op1)
21628 : gen_cmpdfeq_gpr (compare_result, op0, op1);
21629 break;
21631 case TFmode:
21632 case IFmode:
21633 case KFmode:
21634 cmp = (flag_finite_math_only && !flag_trapping_math)
21635 ? gen_tsttfeq_gpr (compare_result, op0, op1)
21636 : gen_cmptfeq_gpr (compare_result, op0, op1);
21637 break;
21639 default:
21640 gcc_unreachable ();
21642 break;
21644 case GT:
21645 case GE:
21646 switch (op_mode)
21648 case SFmode:
21649 cmp = (flag_finite_math_only && !flag_trapping_math)
21650 ? gen_tstsfgt_gpr (compare_result, op0, op1)
21651 : gen_cmpsfgt_gpr (compare_result, op0, op1);
21652 break;
21654 case DFmode:
21655 cmp = (flag_finite_math_only && !flag_trapping_math)
21656 ? gen_tstdfgt_gpr (compare_result, op0, op1)
21657 : gen_cmpdfgt_gpr (compare_result, op0, op1);
21658 break;
21660 case TFmode:
21661 case IFmode:
21662 case KFmode:
21663 cmp = (flag_finite_math_only && !flag_trapping_math)
21664 ? gen_tsttfgt_gpr (compare_result, op0, op1)
21665 : gen_cmptfgt_gpr (compare_result, op0, op1);
21666 break;
21668 default:
21669 gcc_unreachable ();
21671 break;
21673 case LT:
21674 case LE:
21675 switch (op_mode)
21677 case SFmode:
21678 cmp = (flag_finite_math_only && !flag_trapping_math)
21679 ? gen_tstsflt_gpr (compare_result, op0, op1)
21680 : gen_cmpsflt_gpr (compare_result, op0, op1);
21681 break;
21683 case DFmode:
21684 cmp = (flag_finite_math_only && !flag_trapping_math)
21685 ? gen_tstdflt_gpr (compare_result, op0, op1)
21686 : gen_cmpdflt_gpr (compare_result, op0, op1);
21687 break;
21689 case TFmode:
21690 case IFmode:
21691 case KFmode:
21692 cmp = (flag_finite_math_only && !flag_trapping_math)
21693 ? gen_tsttflt_gpr (compare_result, op0, op1)
21694 : gen_cmptflt_gpr (compare_result, op0, op1);
21695 break;
21697 default:
21698 gcc_unreachable ();
21700 break;
21702 default:
21703 gcc_unreachable ();
21706 /* Synthesize LE and GE from LT/GT || EQ. */
21707 if (code == LE || code == GE)
21709 emit_insn (cmp);
21711 compare_result2 = gen_reg_rtx (CCFPmode);
21713 /* Do the EQ. */
21714 switch (op_mode)
21716 case SFmode:
21717 cmp = (flag_finite_math_only && !flag_trapping_math)
21718 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
21719 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
21720 break;
21722 case DFmode:
21723 cmp = (flag_finite_math_only && !flag_trapping_math)
21724 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
21725 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
21726 break;
21728 case TFmode:
21729 case IFmode:
21730 case KFmode:
21731 cmp = (flag_finite_math_only && !flag_trapping_math)
21732 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
21733 : gen_cmptfeq_gpr (compare_result2, op0, op1);
21734 break;
21736 default:
21737 gcc_unreachable ();
21740 emit_insn (cmp);
21742 /* OR them together. */
21743 or_result = gen_reg_rtx (CCFPmode);
21744 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
21745 compare_result2);
21746 compare_result = or_result;
21749 code = reverse_p ? NE : EQ;
21751 emit_insn (cmp);
21754 /* IEEE 128-bit support in VSX registers when we do not have hardware
21755 support. */
21756 else if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21758 rtx libfunc = NULL_RTX;
21759 bool check_nan = false;
21760 rtx dest;
21762 switch (code)
21764 case EQ:
21765 case NE:
21766 libfunc = optab_libfunc (eq_optab, mode);
21767 break;
21769 case GT:
21770 case GE:
21771 libfunc = optab_libfunc (ge_optab, mode);
21772 break;
21774 case LT:
21775 case LE:
21776 libfunc = optab_libfunc (le_optab, mode);
21777 break;
21779 case UNORDERED:
21780 case ORDERED:
21781 libfunc = optab_libfunc (unord_optab, mode);
21782 code = (code == UNORDERED) ? NE : EQ;
21783 break;
21785 case UNGE:
21786 case UNGT:
21787 check_nan = true;
21788 libfunc = optab_libfunc (ge_optab, mode);
21789 code = (code == UNGE) ? GE : GT;
21790 break;
21792 case UNLE:
21793 case UNLT:
21794 check_nan = true;
21795 libfunc = optab_libfunc (le_optab, mode);
21796 code = (code == UNLE) ? LE : LT;
21797 break;
21799 case UNEQ:
21800 case LTGT:
21801 check_nan = true;
21802 libfunc = optab_libfunc (eq_optab, mode);
21803 code = (code = UNEQ) ? EQ : NE;
21804 break;
21806 default:
21807 gcc_unreachable ();
21810 gcc_assert (libfunc);
21812 if (!check_nan)
21813 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21814 SImode, 2, op0, mode, op1, mode);
21816 /* The library signals an exception for signalling NaNs, so we need to
21817 handle isgreater, etc. by first checking isordered. */
21818 else
21820 rtx ne_rtx, normal_dest, unord_dest;
21821 rtx unord_func = optab_libfunc (unord_optab, mode);
21822 rtx join_label = gen_label_rtx ();
21823 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21824 rtx unord_cmp = gen_reg_rtx (comp_mode);
21827 /* Test for either value being a NaN. */
21828 gcc_assert (unord_func);
21829 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21830 SImode, 2, op0, mode, op1,
21831 mode);
21833 /* Set value (0) if either value is a NaN, and jump to the join
21834 label. */
21835 dest = gen_reg_rtx (SImode);
21836 emit_move_insn (dest, const1_rtx);
21837 emit_insn (gen_rtx_SET (unord_cmp,
21838 gen_rtx_COMPARE (comp_mode, unord_dest,
21839 const0_rtx)));
21841 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21842 emit_jump_insn (gen_rtx_SET (pc_rtx,
21843 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21844 join_ref,
21845 pc_rtx)));
21847 /* Do the normal comparison, knowing that the values are not
21848 NaNs. */
21849 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21850 SImode, 2, op0, mode, op1,
21851 mode);
21853 emit_insn (gen_cstoresi4 (dest,
21854 gen_rtx_fmt_ee (code, SImode, normal_dest,
21855 const0_rtx),
21856 normal_dest, const0_rtx));
21858 /* Join NaN and non-Nan paths. Compare dest against 0. */
21859 emit_label (join_label);
21860 code = NE;
21863 emit_insn (gen_rtx_SET (compare_result,
21864 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21867 else
21869 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21870 CLOBBERs to match cmptf_internal2 pattern. */
21871 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21872 && FLOAT128_IBM_P (GET_MODE (op0))
21873 && TARGET_HARD_FLOAT && TARGET_FPRS)
21874 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21875 gen_rtvec (10,
21876 gen_rtx_SET (compare_result,
21877 gen_rtx_COMPARE (comp_mode, op0, op1)),
21878 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21879 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21880 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21881 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21882 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21883 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21884 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21885 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21886 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21887 else if (GET_CODE (op1) == UNSPEC
21888 && XINT (op1, 1) == UNSPEC_SP_TEST)
21890 rtx op1b = XVECEXP (op1, 0, 0);
21891 comp_mode = CCEQmode;
21892 compare_result = gen_reg_rtx (CCEQmode);
21893 if (TARGET_64BIT)
21894 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21895 else
21896 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21898 else
21899 emit_insn (gen_rtx_SET (compare_result,
21900 gen_rtx_COMPARE (comp_mode, op0, op1)));
21903 /* Some kinds of FP comparisons need an OR operation;
21904 under flag_finite_math_only we don't bother. */
21905 if (FLOAT_MODE_P (mode)
21906 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21907 && !flag_finite_math_only
21908 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
21909 && (code == LE || code == GE
21910 || code == UNEQ || code == LTGT
21911 || code == UNGT || code == UNLT))
21913 enum rtx_code or1, or2;
21914 rtx or1_rtx, or2_rtx, compare2_rtx;
21915 rtx or_result = gen_reg_rtx (CCEQmode);
21917 switch (code)
21919 case LE: or1 = LT; or2 = EQ; break;
21920 case GE: or1 = GT; or2 = EQ; break;
21921 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21922 case LTGT: or1 = LT; or2 = GT; break;
21923 case UNGT: or1 = UNORDERED; or2 = GT; break;
21924 case UNLT: or1 = UNORDERED; or2 = LT; break;
21925 default: gcc_unreachable ();
21927 validate_condition_mode (or1, comp_mode);
21928 validate_condition_mode (or2, comp_mode);
21929 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21930 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21931 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21932 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21933 const_true_rtx);
21934 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21936 compare_result = or_result;
21937 code = EQ;
21940 validate_condition_mode (code, GET_MODE (compare_result));
21942 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21946 /* Return the diagnostic message string if the binary operation OP is
21947 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21949 static const char*
21950 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21951 const_tree type1,
21952 const_tree type2)
21954 enum machine_mode mode1 = TYPE_MODE (type1);
21955 enum machine_mode mode2 = TYPE_MODE (type2);
21957 /* For complex modes, use the inner type. */
21958 if (COMPLEX_MODE_P (mode1))
21959 mode1 = GET_MODE_INNER (mode1);
21961 if (COMPLEX_MODE_P (mode2))
21962 mode2 = GET_MODE_INNER (mode2);
21964 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21965 double to intermix unless -mfloat128-convert. */
21966 if (mode1 == mode2)
21967 return NULL;
21969 if (!TARGET_FLOAT128_CVT)
21971 if ((mode1 == KFmode && mode2 == IFmode)
21972 || (mode1 == IFmode && mode2 == KFmode))
21973 return N_("__float128 and __ibm128 cannot be used in the same "
21974 "expression");
21976 if (TARGET_IEEEQUAD
21977 && ((mode1 == IFmode && mode2 == TFmode)
21978 || (mode1 == TFmode && mode2 == IFmode)))
21979 return N_("__ibm128 and long double cannot be used in the same "
21980 "expression");
21982 if (!TARGET_IEEEQUAD
21983 && ((mode1 == KFmode && mode2 == TFmode)
21984 || (mode1 == TFmode && mode2 == KFmode)))
21985 return N_("__float128 and long double cannot be used in the same "
21986 "expression");
21989 return NULL;
21993 /* Expand floating point conversion to/from __float128 and __ibm128. */
21995 void
21996 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21998 machine_mode dest_mode = GET_MODE (dest);
21999 machine_mode src_mode = GET_MODE (src);
22000 convert_optab cvt = unknown_optab;
22001 bool do_move = false;
22002 rtx libfunc = NULL_RTX;
22003 rtx dest2;
22004 typedef rtx (*rtx_2func_t) (rtx, rtx);
22005 rtx_2func_t hw_convert = (rtx_2func_t)0;
22006 size_t kf_or_tf;
22008 struct hw_conv_t {
22009 rtx_2func_t from_df;
22010 rtx_2func_t from_sf;
22011 rtx_2func_t from_si_sign;
22012 rtx_2func_t from_si_uns;
22013 rtx_2func_t from_di_sign;
22014 rtx_2func_t from_di_uns;
22015 rtx_2func_t to_df;
22016 rtx_2func_t to_sf;
22017 rtx_2func_t to_si_sign;
22018 rtx_2func_t to_si_uns;
22019 rtx_2func_t to_di_sign;
22020 rtx_2func_t to_di_uns;
22021 } hw_conversions[2] = {
22022 /* convertions to/from KFmode */
22024 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22025 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22026 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22027 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22028 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22029 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22030 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22031 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22032 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22033 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22034 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22035 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22038 /* convertions to/from TFmode */
22040 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22041 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22042 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22043 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22044 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22045 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22046 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22047 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22048 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22049 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22050 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22051 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22055 if (dest_mode == src_mode)
22056 gcc_unreachable ();
22058 /* Eliminate memory operations. */
22059 if (MEM_P (src))
22060 src = force_reg (src_mode, src);
22062 if (MEM_P (dest))
22064 rtx tmp = gen_reg_rtx (dest_mode);
22065 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22066 rs6000_emit_move (dest, tmp, dest_mode);
22067 return;
22070 /* Convert to IEEE 128-bit floating point. */
22071 if (FLOAT128_IEEE_P (dest_mode))
22073 if (dest_mode == KFmode)
22074 kf_or_tf = 0;
22075 else if (dest_mode == TFmode)
22076 kf_or_tf = 1;
22077 else
22078 gcc_unreachable ();
22080 switch (src_mode)
22082 case DFmode:
22083 cvt = sext_optab;
22084 hw_convert = hw_conversions[kf_or_tf].from_df;
22085 break;
22087 case SFmode:
22088 cvt = sext_optab;
22089 hw_convert = hw_conversions[kf_or_tf].from_sf;
22090 break;
22092 case KFmode:
22093 case IFmode:
22094 case TFmode:
22095 if (FLOAT128_IBM_P (src_mode))
22096 cvt = sext_optab;
22097 else
22098 do_move = true;
22099 break;
22101 case SImode:
22102 if (unsigned_p)
22104 cvt = ufloat_optab;
22105 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22107 else
22109 cvt = sfloat_optab;
22110 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22112 break;
22114 case DImode:
22115 if (unsigned_p)
22117 cvt = ufloat_optab;
22118 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22120 else
22122 cvt = sfloat_optab;
22123 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22125 break;
22127 default:
22128 gcc_unreachable ();
22132 /* Convert from IEEE 128-bit floating point. */
22133 else if (FLOAT128_IEEE_P (src_mode))
22135 if (src_mode == KFmode)
22136 kf_or_tf = 0;
22137 else if (src_mode == TFmode)
22138 kf_or_tf = 1;
22139 else
22140 gcc_unreachable ();
22142 switch (dest_mode)
22144 case DFmode:
22145 cvt = trunc_optab;
22146 hw_convert = hw_conversions[kf_or_tf].to_df;
22147 break;
22149 case SFmode:
22150 cvt = trunc_optab;
22151 hw_convert = hw_conversions[kf_or_tf].to_sf;
22152 break;
22154 case KFmode:
22155 case IFmode:
22156 case TFmode:
22157 if (FLOAT128_IBM_P (dest_mode))
22158 cvt = trunc_optab;
22159 else
22160 do_move = true;
22161 break;
22163 case SImode:
22164 if (unsigned_p)
22166 cvt = ufix_optab;
22167 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22169 else
22171 cvt = sfix_optab;
22172 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22174 break;
22176 case DImode:
22177 if (unsigned_p)
22179 cvt = ufix_optab;
22180 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22182 else
22184 cvt = sfix_optab;
22185 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22187 break;
22189 default:
22190 gcc_unreachable ();
22194 /* Both IBM format. */
22195 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22196 do_move = true;
22198 else
22199 gcc_unreachable ();
22201 /* Handle conversion between TFmode/KFmode. */
22202 if (do_move)
22203 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22205 /* Handle conversion if we have hardware support. */
22206 else if (TARGET_FLOAT128_HW && hw_convert)
22207 emit_insn ((hw_convert) (dest, src));
22209 /* Call an external function to do the conversion. */
22210 else if (cvt != unknown_optab)
22212 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22213 gcc_assert (libfunc != NULL_RTX);
22215 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode, 1, src,
22216 src_mode);
22218 gcc_assert (dest2 != NULL_RTX);
22219 if (!rtx_equal_p (dest, dest2))
22220 emit_move_insn (dest, dest2);
22223 else
22224 gcc_unreachable ();
22226 return;
22229 /* Split a conversion from __float128 to an integer type into separate insns.
22230 OPERANDS points to the destination, source, and V2DI temporary
22231 register. CODE is either FIX or UNSIGNED_FIX. */
22233 void
22234 convert_float128_to_int (rtx *operands, enum rtx_code code)
22236 rtx dest = operands[0];
22237 rtx src = operands[1];
22238 rtx tmp = operands[2];
22239 rtx cvt;
22240 rtvec cvt_vec;
22241 rtx cvt_unspec;
22242 rtvec move_vec;
22243 rtx move_unspec;
22245 if (GET_CODE (tmp) == SCRATCH)
22246 tmp = gen_reg_rtx (V2DImode);
22248 if (MEM_P (dest))
22249 dest = rs6000_address_for_fpconvert (dest);
22251 /* Generate the actual convert insn of the form:
22252 (set (tmp) (unspec:V2DI [(fix:SI (reg:KF))] UNSPEC_IEEE128_CONVERT)). */
22253 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), src);
22254 cvt_vec = gen_rtvec (1, cvt);
22255 cvt_unspec = gen_rtx_UNSPEC (V2DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
22256 emit_insn (gen_rtx_SET (tmp, cvt_unspec));
22258 /* Generate the move insn of the form:
22259 (set (dest:SI) (unspec:SI [(tmp:V2DI))] UNSPEC_IEEE128_MOVE)). */
22260 move_vec = gen_rtvec (1, tmp);
22261 move_unspec = gen_rtx_UNSPEC (GET_MODE (dest), move_vec, UNSPEC_IEEE128_MOVE);
22262 emit_insn (gen_rtx_SET (dest, move_unspec));
22265 /* Split a conversion from an integer type to __float128 into separate insns.
22266 OPERANDS points to the destination, source, and V2DI temporary
22267 register. CODE is either FLOAT or UNSIGNED_FLOAT. */
22269 void
22270 convert_int_to_float128 (rtx *operands, enum rtx_code code)
22272 rtx dest = operands[0];
22273 rtx src = operands[1];
22274 rtx tmp = operands[2];
22275 rtx cvt;
22276 rtvec cvt_vec;
22277 rtx cvt_unspec;
22278 rtvec move_vec;
22279 rtx move_unspec;
22280 rtx unsigned_flag;
22282 if (GET_CODE (tmp) == SCRATCH)
22283 tmp = gen_reg_rtx (V2DImode);
22285 if (MEM_P (src))
22286 src = rs6000_address_for_fpconvert (src);
22288 /* Generate the move of the integer into the Altivec register of the form:
22289 (set (tmp:V2DI) (unspec:V2DI [(src:SI)
22290 (const_int 0)] UNSPEC_IEEE128_MOVE)).
22293 (set (tmp:V2DI) (unspec:V2DI [(src:DI)] UNSPEC_IEEE128_MOVE)). */
22295 if (GET_MODE (src) == SImode)
22297 unsigned_flag = (code == UNSIGNED_FLOAT) ? const1_rtx : const0_rtx;
22298 move_vec = gen_rtvec (2, src, unsigned_flag);
22300 else
22301 move_vec = gen_rtvec (1, src);
22303 move_unspec = gen_rtx_UNSPEC (V2DImode, move_vec, UNSPEC_IEEE128_MOVE);
22304 emit_insn (gen_rtx_SET (tmp, move_unspec));
22306 /* Generate the actual convert insn of the form:
22307 (set (dest:KF) (float:KF (unspec:DI [(tmp:V2DI)]
22308 UNSPEC_IEEE128_CONVERT))). */
22309 cvt_vec = gen_rtvec (1, tmp);
22310 cvt_unspec = gen_rtx_UNSPEC (DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
22311 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), cvt_unspec);
22312 emit_insn (gen_rtx_SET (dest, cvt));
22316 /* Emit the RTL for an sISEL pattern. */
22318 void
22319 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22321 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22324 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22325 can be used as that dest register. Return the dest register. */
22328 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22330 if (op2 == const0_rtx)
22331 return op1;
22333 if (GET_CODE (scratch) == SCRATCH)
22334 scratch = gen_reg_rtx (mode);
22336 if (logical_operand (op2, mode))
22337 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22338 else
22339 emit_insn (gen_rtx_SET (scratch,
22340 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22342 return scratch;
22345 void
22346 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22348 rtx condition_rtx;
22349 machine_mode op_mode;
22350 enum rtx_code cond_code;
22351 rtx result = operands[0];
22353 condition_rtx = rs6000_generate_compare (operands[1], mode);
22354 cond_code = GET_CODE (condition_rtx);
22356 if (FLOAT_MODE_P (mode)
22357 && !TARGET_FPRS && TARGET_HARD_FLOAT)
22359 rtx t;
22361 PUT_MODE (condition_rtx, SImode);
22362 t = XEXP (condition_rtx, 0);
22364 gcc_assert (cond_code == NE || cond_code == EQ);
22366 if (cond_code == NE)
22367 emit_insn (gen_e500_flip_gt_bit (t, t));
22369 emit_insn (gen_move_from_CR_gt_bit (result, t));
22370 return;
22373 if (cond_code == NE
22374 || cond_code == GE || cond_code == LE
22375 || cond_code == GEU || cond_code == LEU
22376 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22378 rtx not_result = gen_reg_rtx (CCEQmode);
22379 rtx not_op, rev_cond_rtx;
22380 machine_mode cc_mode;
22382 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22384 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22385 SImode, XEXP (condition_rtx, 0), const0_rtx);
22386 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22387 emit_insn (gen_rtx_SET (not_result, not_op));
22388 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22391 op_mode = GET_MODE (XEXP (operands[1], 0));
22392 if (op_mode == VOIDmode)
22393 op_mode = GET_MODE (XEXP (operands[1], 1));
22395 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22397 PUT_MODE (condition_rtx, DImode);
22398 convert_move (result, condition_rtx, 0);
22400 else
22402 PUT_MODE (condition_rtx, SImode);
22403 emit_insn (gen_rtx_SET (result, condition_rtx));
22407 /* Emit a branch of kind CODE to location LOC. */
22409 void
22410 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22412 rtx condition_rtx, loc_ref;
22414 condition_rtx = rs6000_generate_compare (operands[0], mode);
22415 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22416 emit_jump_insn (gen_rtx_SET (pc_rtx,
22417 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22418 loc_ref, pc_rtx)));
22421 /* Return the string to output a conditional branch to LABEL, which is
22422 the operand template of the label, or NULL if the branch is really a
22423 conditional return.
22425 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22426 condition code register and its mode specifies what kind of
22427 comparison we made.
22429 REVERSED is nonzero if we should reverse the sense of the comparison.
22431 INSN is the insn. */
22433 char *
22434 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22436 static char string[64];
22437 enum rtx_code code = GET_CODE (op);
22438 rtx cc_reg = XEXP (op, 0);
22439 machine_mode mode = GET_MODE (cc_reg);
22440 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22441 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22442 int really_reversed = reversed ^ need_longbranch;
22443 char *s = string;
22444 const char *ccode;
22445 const char *pred;
22446 rtx note;
22448 validate_condition_mode (code, mode);
22450 /* Work out which way this really branches. We could use
22451 reverse_condition_maybe_unordered here always but this
22452 makes the resulting assembler clearer. */
22453 if (really_reversed)
22455 /* Reversal of FP compares takes care -- an ordered compare
22456 becomes an unordered compare and vice versa. */
22457 if (mode == CCFPmode)
22458 code = reverse_condition_maybe_unordered (code);
22459 else
22460 code = reverse_condition (code);
22463 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
22465 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
22466 to the GT bit. */
22467 switch (code)
22469 case EQ:
22470 /* Opposite of GT. */
22471 code = GT;
22472 break;
22474 case NE:
22475 code = UNLE;
22476 break;
22478 default:
22479 gcc_unreachable ();
22483 switch (code)
22485 /* Not all of these are actually distinct opcodes, but
22486 we distinguish them for clarity of the resulting assembler. */
22487 case NE: case LTGT:
22488 ccode = "ne"; break;
22489 case EQ: case UNEQ:
22490 ccode = "eq"; break;
22491 case GE: case GEU:
22492 ccode = "ge"; break;
22493 case GT: case GTU: case UNGT:
22494 ccode = "gt"; break;
22495 case LE: case LEU:
22496 ccode = "le"; break;
22497 case LT: case LTU: case UNLT:
22498 ccode = "lt"; break;
22499 case UNORDERED: ccode = "un"; break;
22500 case ORDERED: ccode = "nu"; break;
22501 case UNGE: ccode = "nl"; break;
22502 case UNLE: ccode = "ng"; break;
22503 default:
22504 gcc_unreachable ();
22507 /* Maybe we have a guess as to how likely the branch is. */
22508 pred = "";
22509 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22510 if (note != NULL_RTX)
22512 /* PROB is the difference from 50%. */
22513 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
22515 /* Only hint for highly probable/improbable branches on newer cpus when
22516 we have real profile data, as static prediction overrides processor
22517 dynamic prediction. For older cpus we may as well always hint, but
22518 assume not taken for branches that are very close to 50% as a
22519 mispredicted taken branch is more expensive than a
22520 mispredicted not-taken branch. */
22521 if (rs6000_always_hint
22522 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22523 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22524 && br_prob_note_reliable_p (note)))
22526 if (abs (prob) > REG_BR_PROB_BASE / 20
22527 && ((prob > 0) ^ need_longbranch))
22528 pred = "+";
22529 else
22530 pred = "-";
22534 if (label == NULL)
22535 s += sprintf (s, "b%slr%s ", ccode, pred);
22536 else
22537 s += sprintf (s, "b%s%s ", ccode, pred);
22539 /* We need to escape any '%' characters in the reg_names string.
22540 Assume they'd only be the first character.... */
22541 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22542 *s++ = '%';
22543 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22545 if (label != NULL)
22547 /* If the branch distance was too far, we may have to use an
22548 unconditional branch to go the distance. */
22549 if (need_longbranch)
22550 s += sprintf (s, ",$+8\n\tb %s", label);
22551 else
22552 s += sprintf (s, ",%s", label);
22555 return string;
22558 /* Return the string to flip the GT bit on a CR. */
22559 char *
22560 output_e500_flip_gt_bit (rtx dst, rtx src)
22562 static char string[64];
22563 int a, b;
22565 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
22566 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
22568 /* GT bit. */
22569 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
22570 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
22572 sprintf (string, "crnot %d,%d", a, b);
22573 return string;
22576 /* Return insn for VSX or Altivec comparisons. */
22578 static rtx
22579 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22581 rtx mask;
22582 machine_mode mode = GET_MODE (op0);
22584 switch (code)
22586 default:
22587 break;
22589 case GE:
22590 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22591 return NULL_RTX;
22593 case EQ:
22594 case GT:
22595 case GTU:
22596 case ORDERED:
22597 case UNORDERED:
22598 case UNEQ:
22599 case LTGT:
22600 mask = gen_reg_rtx (mode);
22601 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22602 return mask;
22605 return NULL_RTX;
22608 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22609 DMODE is expected destination mode. This is a recursive function. */
22611 static rtx
22612 rs6000_emit_vector_compare (enum rtx_code rcode,
22613 rtx op0, rtx op1,
22614 machine_mode dmode)
22616 rtx mask;
22617 bool swap_operands = false;
22618 bool try_again = false;
22620 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22621 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22623 /* See if the comparison works as is. */
22624 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22625 if (mask)
22626 return mask;
22628 switch (rcode)
22630 case LT:
22631 rcode = GT;
22632 swap_operands = true;
22633 try_again = true;
22634 break;
22635 case LTU:
22636 rcode = GTU;
22637 swap_operands = true;
22638 try_again = true;
22639 break;
22640 case NE:
22641 case UNLE:
22642 case UNLT:
22643 case UNGE:
22644 case UNGT:
22645 /* Invert condition and try again.
22646 e.g., A != B becomes ~(A==B). */
22648 enum rtx_code rev_code;
22649 enum insn_code nor_code;
22650 rtx mask2;
22652 rev_code = reverse_condition_maybe_unordered (rcode);
22653 if (rev_code == UNKNOWN)
22654 return NULL_RTX;
22656 nor_code = optab_handler (one_cmpl_optab, dmode);
22657 if (nor_code == CODE_FOR_nothing)
22658 return NULL_RTX;
22660 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22661 if (!mask2)
22662 return NULL_RTX;
22664 mask = gen_reg_rtx (dmode);
22665 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22666 return mask;
22668 break;
22669 case GE:
22670 case GEU:
22671 case LE:
22672 case LEU:
22673 /* Try GT/GTU/LT/LTU OR EQ */
22675 rtx c_rtx, eq_rtx;
22676 enum insn_code ior_code;
22677 enum rtx_code new_code;
22679 switch (rcode)
22681 case GE:
22682 new_code = GT;
22683 break;
22685 case GEU:
22686 new_code = GTU;
22687 break;
22689 case LE:
22690 new_code = LT;
22691 break;
22693 case LEU:
22694 new_code = LTU;
22695 break;
22697 default:
22698 gcc_unreachable ();
22701 ior_code = optab_handler (ior_optab, dmode);
22702 if (ior_code == CODE_FOR_nothing)
22703 return NULL_RTX;
22705 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22706 if (!c_rtx)
22707 return NULL_RTX;
22709 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22710 if (!eq_rtx)
22711 return NULL_RTX;
22713 mask = gen_reg_rtx (dmode);
22714 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22715 return mask;
22717 break;
22718 default:
22719 return NULL_RTX;
22722 if (try_again)
22724 if (swap_operands)
22725 std::swap (op0, op1);
22727 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22728 if (mask)
22729 return mask;
22732 /* You only get two chances. */
22733 return NULL_RTX;
22736 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22737 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22738 operands for the relation operation COND. */
22741 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22742 rtx cond, rtx cc_op0, rtx cc_op1)
22744 machine_mode dest_mode = GET_MODE (dest);
22745 machine_mode mask_mode = GET_MODE (cc_op0);
22746 enum rtx_code rcode = GET_CODE (cond);
22747 machine_mode cc_mode = CCmode;
22748 rtx mask;
22749 rtx cond2;
22750 rtx tmp;
22751 bool invert_move = false;
22753 if (VECTOR_UNIT_NONE_P (dest_mode))
22754 return 0;
22756 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22757 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22759 switch (rcode)
22761 /* Swap operands if we can, and fall back to doing the operation as
22762 specified, and doing a NOR to invert the test. */
22763 case NE:
22764 case UNLE:
22765 case UNLT:
22766 case UNGE:
22767 case UNGT:
22768 /* Invert condition and try again.
22769 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22770 invert_move = true;
22771 rcode = reverse_condition_maybe_unordered (rcode);
22772 if (rcode == UNKNOWN)
22773 return 0;
22774 break;
22776 /* Mark unsigned tests with CCUNSmode. */
22777 case GTU:
22778 case GEU:
22779 case LTU:
22780 case LEU:
22781 cc_mode = CCUNSmode;
22782 break;
22784 default:
22785 break;
22788 /* Get the vector mask for the given relational operations. */
22789 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22791 if (!mask)
22792 return 0;
22794 if (invert_move)
22796 tmp = op_true;
22797 op_true = op_false;
22798 op_false = tmp;
22801 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22802 CONST0_RTX (dest_mode));
22803 emit_insn (gen_rtx_SET (dest,
22804 gen_rtx_IF_THEN_ELSE (dest_mode,
22805 cond2,
22806 op_true,
22807 op_false)));
22808 return 1;
22811 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22812 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22813 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22814 hardware has no such operation. */
22816 static int
22817 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22819 enum rtx_code code = GET_CODE (op);
22820 rtx op0 = XEXP (op, 0);
22821 rtx op1 = XEXP (op, 1);
22822 machine_mode compare_mode = GET_MODE (op0);
22823 machine_mode result_mode = GET_MODE (dest);
22824 bool max_p = false;
22826 if (result_mode != compare_mode)
22827 return 0;
22829 if (code == GE || code == GT)
22830 max_p = true;
22831 else if (code == LE || code == LT)
22832 max_p = false;
22833 else
22834 return 0;
22836 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22839 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22840 max_p = !max_p;
22842 else
22843 return 0;
22845 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22846 return 1;
22849 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22850 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22851 operands of the last comparison is nonzero/true, FALSE_COND if it is
22852 zero/false. Return 0 if the hardware has no such operation. */
22854 static int
22855 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22857 enum rtx_code code = GET_CODE (op);
22858 rtx op0 = XEXP (op, 0);
22859 rtx op1 = XEXP (op, 1);
22860 machine_mode result_mode = GET_MODE (dest);
22861 rtx compare_rtx;
22862 rtx cmove_rtx;
22863 rtx clobber_rtx;
22865 if (!can_create_pseudo_p ())
22866 return 0;
22868 switch (code)
22870 case EQ:
22871 case GE:
22872 case GT:
22873 break;
22875 case NE:
22876 case LT:
22877 case LE:
22878 code = swap_condition (code);
22879 std::swap (op0, op1);
22880 break;
22882 default:
22883 return 0;
22886 /* Generate: [(parallel [(set (dest)
22887 (if_then_else (op (cmp1) (cmp2))
22888 (true)
22889 (false)))
22890 (clobber (scratch))])]. */
22892 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22893 cmove_rtx = gen_rtx_SET (dest,
22894 gen_rtx_IF_THEN_ELSE (result_mode,
22895 compare_rtx,
22896 true_cond,
22897 false_cond));
22899 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22900 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22901 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22903 return 1;
22906 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22907 operands of the last comparison is nonzero/true, FALSE_COND if it
22908 is zero/false. Return 0 if the hardware has no such operation. */
22911 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22913 enum rtx_code code = GET_CODE (op);
22914 rtx op0 = XEXP (op, 0);
22915 rtx op1 = XEXP (op, 1);
22916 machine_mode compare_mode = GET_MODE (op0);
22917 machine_mode result_mode = GET_MODE (dest);
22918 rtx temp;
22919 bool is_against_zero;
22921 /* These modes should always match. */
22922 if (GET_MODE (op1) != compare_mode
22923 /* In the isel case however, we can use a compare immediate, so
22924 op1 may be a small constant. */
22925 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22926 return 0;
22927 if (GET_MODE (true_cond) != result_mode)
22928 return 0;
22929 if (GET_MODE (false_cond) != result_mode)
22930 return 0;
22932 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22933 if (TARGET_P9_MINMAX
22934 && (compare_mode == SFmode || compare_mode == DFmode)
22935 && (result_mode == SFmode || result_mode == DFmode))
22937 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22938 return 1;
22940 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22941 return 1;
22944 /* Don't allow using floating point comparisons for integer results for
22945 now. */
22946 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22947 return 0;
22949 /* First, work out if the hardware can do this at all, or
22950 if it's too slow.... */
22951 if (!FLOAT_MODE_P (compare_mode))
22953 if (TARGET_ISEL)
22954 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22955 return 0;
22957 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
22958 && SCALAR_FLOAT_MODE_P (compare_mode))
22959 return 0;
22961 is_against_zero = op1 == CONST0_RTX (compare_mode);
22963 /* A floating-point subtract might overflow, underflow, or produce
22964 an inexact result, thus changing the floating-point flags, so it
22965 can't be generated if we care about that. It's safe if one side
22966 of the construct is zero, since then no subtract will be
22967 generated. */
22968 if (SCALAR_FLOAT_MODE_P (compare_mode)
22969 && flag_trapping_math && ! is_against_zero)
22970 return 0;
22972 /* Eliminate half of the comparisons by switching operands, this
22973 makes the remaining code simpler. */
22974 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22975 || code == LTGT || code == LT || code == UNLE)
22977 code = reverse_condition_maybe_unordered (code);
22978 temp = true_cond;
22979 true_cond = false_cond;
22980 false_cond = temp;
22983 /* UNEQ and LTGT take four instructions for a comparison with zero,
22984 it'll probably be faster to use a branch here too. */
22985 if (code == UNEQ && HONOR_NANS (compare_mode))
22986 return 0;
22988 /* We're going to try to implement comparisons by performing
22989 a subtract, then comparing against zero. Unfortunately,
22990 Inf - Inf is NaN which is not zero, and so if we don't
22991 know that the operand is finite and the comparison
22992 would treat EQ different to UNORDERED, we can't do it. */
22993 if (HONOR_INFINITIES (compare_mode)
22994 && code != GT && code != UNGE
22995 && (GET_CODE (op1) != CONST_DOUBLE
22996 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22997 /* Constructs of the form (a OP b ? a : b) are safe. */
22998 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22999 || (! rtx_equal_p (op0, true_cond)
23000 && ! rtx_equal_p (op1, true_cond))))
23001 return 0;
23003 /* At this point we know we can use fsel. */
23005 /* Reduce the comparison to a comparison against zero. */
23006 if (! is_against_zero)
23008 temp = gen_reg_rtx (compare_mode);
23009 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23010 op0 = temp;
23011 op1 = CONST0_RTX (compare_mode);
23014 /* If we don't care about NaNs we can reduce some of the comparisons
23015 down to faster ones. */
23016 if (! HONOR_NANS (compare_mode))
23017 switch (code)
23019 case GT:
23020 code = LE;
23021 temp = true_cond;
23022 true_cond = false_cond;
23023 false_cond = temp;
23024 break;
23025 case UNGE:
23026 code = GE;
23027 break;
23028 case UNEQ:
23029 code = EQ;
23030 break;
23031 default:
23032 break;
23035 /* Now, reduce everything down to a GE. */
23036 switch (code)
23038 case GE:
23039 break;
23041 case LE:
23042 temp = gen_reg_rtx (compare_mode);
23043 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23044 op0 = temp;
23045 break;
23047 case ORDERED:
23048 temp = gen_reg_rtx (compare_mode);
23049 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23050 op0 = temp;
23051 break;
23053 case EQ:
23054 temp = gen_reg_rtx (compare_mode);
23055 emit_insn (gen_rtx_SET (temp,
23056 gen_rtx_NEG (compare_mode,
23057 gen_rtx_ABS (compare_mode, op0))));
23058 op0 = temp;
23059 break;
23061 case UNGE:
23062 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23063 temp = gen_reg_rtx (result_mode);
23064 emit_insn (gen_rtx_SET (temp,
23065 gen_rtx_IF_THEN_ELSE (result_mode,
23066 gen_rtx_GE (VOIDmode,
23067 op0, op1),
23068 true_cond, false_cond)));
23069 false_cond = true_cond;
23070 true_cond = temp;
23072 temp = gen_reg_rtx (compare_mode);
23073 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23074 op0 = temp;
23075 break;
23077 case GT:
23078 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23079 temp = gen_reg_rtx (result_mode);
23080 emit_insn (gen_rtx_SET (temp,
23081 gen_rtx_IF_THEN_ELSE (result_mode,
23082 gen_rtx_GE (VOIDmode,
23083 op0, op1),
23084 true_cond, false_cond)));
23085 true_cond = false_cond;
23086 false_cond = temp;
23088 temp = gen_reg_rtx (compare_mode);
23089 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23090 op0 = temp;
23091 break;
23093 default:
23094 gcc_unreachable ();
23097 emit_insn (gen_rtx_SET (dest,
23098 gen_rtx_IF_THEN_ELSE (result_mode,
23099 gen_rtx_GE (VOIDmode,
23100 op0, op1),
23101 true_cond, false_cond)));
23102 return 1;
23105 /* Same as above, but for ints (isel). */
23107 static int
23108 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23110 rtx condition_rtx, cr;
23111 machine_mode mode = GET_MODE (dest);
23112 enum rtx_code cond_code;
23113 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23114 bool signedp;
23116 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23117 return 0;
23119 /* We still have to do the compare, because isel doesn't do a
23120 compare, it just looks at the CRx bits set by a previous compare
23121 instruction. */
23122 condition_rtx = rs6000_generate_compare (op, mode);
23123 cond_code = GET_CODE (condition_rtx);
23124 cr = XEXP (condition_rtx, 0);
23125 signedp = GET_MODE (cr) == CCmode;
23127 isel_func = (mode == SImode
23128 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23129 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23131 switch (cond_code)
23133 case LT: case GT: case LTU: case GTU: case EQ:
23134 /* isel handles these directly. */
23135 break;
23137 default:
23138 /* We need to swap the sense of the comparison. */
23140 std::swap (false_cond, true_cond);
23141 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23143 break;
23146 false_cond = force_reg (mode, false_cond);
23147 if (true_cond != const0_rtx)
23148 true_cond = force_reg (mode, true_cond);
23150 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23152 return 1;
23155 const char *
23156 output_isel (rtx *operands)
23158 enum rtx_code code;
23160 code = GET_CODE (operands[1]);
23162 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23164 gcc_assert (GET_CODE (operands[2]) == REG
23165 && GET_CODE (operands[3]) == REG);
23166 PUT_CODE (operands[1], reverse_condition (code));
23167 return "isel %0,%3,%2,%j1";
23170 return "isel %0,%2,%3,%j1";
23173 void
23174 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23176 machine_mode mode = GET_MODE (op0);
23177 enum rtx_code c;
23178 rtx target;
23180 /* VSX/altivec have direct min/max insns. */
23181 if ((code == SMAX || code == SMIN)
23182 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23183 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23185 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23186 return;
23189 if (code == SMAX || code == SMIN)
23190 c = GE;
23191 else
23192 c = GEU;
23194 if (code == SMAX || code == UMAX)
23195 target = emit_conditional_move (dest, c, op0, op1, mode,
23196 op0, op1, mode, 0);
23197 else
23198 target = emit_conditional_move (dest, c, op0, op1, mode,
23199 op1, op0, mode, 0);
23200 gcc_assert (target);
23201 if (target != dest)
23202 emit_move_insn (dest, target);
23205 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23206 for the value to come from memory or if it is already loaded into a GPR. */
23208 void
23209 rs6000_split_signbit (rtx dest, rtx src)
23211 machine_mode d_mode = GET_MODE (dest);
23212 machine_mode s_mode = GET_MODE (src);
23213 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23214 rtx shift_reg = dest_di;
23216 gcc_assert (REG_P (dest));
23217 gcc_assert (REG_P (src) || MEM_P (src));
23218 gcc_assert (s_mode == KFmode || s_mode == TFmode);
23220 if (MEM_P (src))
23222 rtx mem = (WORDS_BIG_ENDIAN
23223 ? adjust_address (src, DImode, 0)
23224 : adjust_address (src, DImode, 8));
23225 emit_insn (gen_rtx_SET (dest_di, mem));
23228 else
23230 unsigned int r = REGNO (src);
23232 /* If this is a VSX register, generate the special mfvsrd instruction
23233 to get it in a GPR. Until we support SF and DF modes, that will
23234 always be true. */
23235 gcc_assert (VSX_REGNO_P (r));
23237 if (s_mode == KFmode)
23238 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23239 else
23240 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23243 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23244 return;
23247 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23248 COND is true. Mark the jump as unlikely to be taken. */
23250 static void
23251 emit_unlikely_jump (rtx cond, rtx label)
23253 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
23254 rtx x;
23256 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23257 x = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23258 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
23261 /* A subroutine of the atomic operation splitters. Emit a load-locked
23262 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23263 the zero_extend operation. */
23265 static void
23266 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23268 rtx (*fn) (rtx, rtx) = NULL;
23270 switch (mode)
23272 case QImode:
23273 fn = gen_load_lockedqi;
23274 break;
23275 case HImode:
23276 fn = gen_load_lockedhi;
23277 break;
23278 case SImode:
23279 if (GET_MODE (mem) == QImode)
23280 fn = gen_load_lockedqi_si;
23281 else if (GET_MODE (mem) == HImode)
23282 fn = gen_load_lockedhi_si;
23283 else
23284 fn = gen_load_lockedsi;
23285 break;
23286 case DImode:
23287 fn = gen_load_lockeddi;
23288 break;
23289 case TImode:
23290 fn = gen_load_lockedti;
23291 break;
23292 default:
23293 gcc_unreachable ();
23295 emit_insn (fn (reg, mem));
23298 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23299 instruction in MODE. */
23301 static void
23302 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23304 rtx (*fn) (rtx, rtx, rtx) = NULL;
23306 switch (mode)
23308 case QImode:
23309 fn = gen_store_conditionalqi;
23310 break;
23311 case HImode:
23312 fn = gen_store_conditionalhi;
23313 break;
23314 case SImode:
23315 fn = gen_store_conditionalsi;
23316 break;
23317 case DImode:
23318 fn = gen_store_conditionaldi;
23319 break;
23320 case TImode:
23321 fn = gen_store_conditionalti;
23322 break;
23323 default:
23324 gcc_unreachable ();
23327 /* Emit sync before stwcx. to address PPC405 Erratum. */
23328 if (PPC405_ERRATUM77)
23329 emit_insn (gen_hwsync ());
23331 emit_insn (fn (res, mem, val));
23334 /* Expand barriers before and after a load_locked/store_cond sequence. */
23336 static rtx
23337 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23339 rtx addr = XEXP (mem, 0);
23340 int strict_p = (reload_in_progress || reload_completed);
23342 if (!legitimate_indirect_address_p (addr, strict_p)
23343 && !legitimate_indexed_address_p (addr, strict_p))
23345 addr = force_reg (Pmode, addr);
23346 mem = replace_equiv_address_nv (mem, addr);
23349 switch (model)
23351 case MEMMODEL_RELAXED:
23352 case MEMMODEL_CONSUME:
23353 case MEMMODEL_ACQUIRE:
23354 break;
23355 case MEMMODEL_RELEASE:
23356 case MEMMODEL_ACQ_REL:
23357 emit_insn (gen_lwsync ());
23358 break;
23359 case MEMMODEL_SEQ_CST:
23360 emit_insn (gen_hwsync ());
23361 break;
23362 default:
23363 gcc_unreachable ();
23365 return mem;
23368 static void
23369 rs6000_post_atomic_barrier (enum memmodel model)
23371 switch (model)
23373 case MEMMODEL_RELAXED:
23374 case MEMMODEL_CONSUME:
23375 case MEMMODEL_RELEASE:
23376 break;
23377 case MEMMODEL_ACQUIRE:
23378 case MEMMODEL_ACQ_REL:
23379 case MEMMODEL_SEQ_CST:
23380 emit_insn (gen_isync ());
23381 break;
23382 default:
23383 gcc_unreachable ();
23387 /* A subroutine of the various atomic expanders. For sub-word operations,
23388 we must adjust things to operate on SImode. Given the original MEM,
23389 return a new aligned memory. Also build and return the quantities by
23390 which to shift and mask. */
23392 static rtx
23393 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23395 rtx addr, align, shift, mask, mem;
23396 HOST_WIDE_INT shift_mask;
23397 machine_mode mode = GET_MODE (orig_mem);
23399 /* For smaller modes, we have to implement this via SImode. */
23400 shift_mask = (mode == QImode ? 0x18 : 0x10);
23402 addr = XEXP (orig_mem, 0);
23403 addr = force_reg (GET_MODE (addr), addr);
23405 /* Aligned memory containing subword. Generate a new memory. We
23406 do not want any of the existing MEM_ATTR data, as we're now
23407 accessing memory outside the original object. */
23408 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23409 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23410 mem = gen_rtx_MEM (SImode, align);
23411 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23412 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23413 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23415 /* Shift amount for subword relative to aligned word. */
23416 shift = gen_reg_rtx (SImode);
23417 addr = gen_lowpart (SImode, addr);
23418 rtx tmp = gen_reg_rtx (SImode);
23419 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23420 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23421 if (BYTES_BIG_ENDIAN)
23422 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23423 shift, 1, OPTAB_LIB_WIDEN);
23424 *pshift = shift;
23426 /* Mask for insertion. */
23427 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23428 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23429 *pmask = mask;
23431 return mem;
23434 /* A subroutine of the various atomic expanders. For sub-word operands,
23435 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23437 static rtx
23438 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23440 rtx x;
23442 x = gen_reg_rtx (SImode);
23443 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23444 gen_rtx_NOT (SImode, mask),
23445 oldval)));
23447 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23449 return x;
23452 /* A subroutine of the various atomic expanders. For sub-word operands,
23453 extract WIDE to NARROW via SHIFT. */
23455 static void
23456 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23458 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23459 wide, 1, OPTAB_LIB_WIDEN);
23460 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23463 /* Expand an atomic compare and swap operation. */
23465 void
23466 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23468 rtx boolval, retval, mem, oldval, newval, cond;
23469 rtx label1, label2, x, mask, shift;
23470 machine_mode mode, orig_mode;
23471 enum memmodel mod_s, mod_f;
23472 bool is_weak;
23474 boolval = operands[0];
23475 retval = operands[1];
23476 mem = operands[2];
23477 oldval = operands[3];
23478 newval = operands[4];
23479 is_weak = (INTVAL (operands[5]) != 0);
23480 mod_s = memmodel_base (INTVAL (operands[6]));
23481 mod_f = memmodel_base (INTVAL (operands[7]));
23482 orig_mode = mode = GET_MODE (mem);
23484 mask = shift = NULL_RTX;
23485 if (mode == QImode || mode == HImode)
23487 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23488 lwarx and shift/mask operations. With power8, we need to do the
23489 comparison in SImode, but the store is still done in QI/HImode. */
23490 oldval = convert_modes (SImode, mode, oldval, 1);
23492 if (!TARGET_SYNC_HI_QI)
23494 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23496 /* Shift and mask OLDVAL into position with the word. */
23497 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23498 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23500 /* Shift and mask NEWVAL into position within the word. */
23501 newval = convert_modes (SImode, mode, newval, 1);
23502 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23503 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23506 /* Prepare to adjust the return value. */
23507 retval = gen_reg_rtx (SImode);
23508 mode = SImode;
23510 else if (reg_overlap_mentioned_p (retval, oldval))
23511 oldval = copy_to_reg (oldval);
23513 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23514 oldval = copy_to_mode_reg (mode, oldval);
23516 if (reg_overlap_mentioned_p (retval, newval))
23517 newval = copy_to_reg (newval);
23519 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23521 label1 = NULL_RTX;
23522 if (!is_weak)
23524 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23525 emit_label (XEXP (label1, 0));
23527 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23529 emit_load_locked (mode, retval, mem);
23531 x = retval;
23532 if (mask)
23533 x = expand_simple_binop (SImode, AND, retval, mask,
23534 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23536 cond = gen_reg_rtx (CCmode);
23537 /* If we have TImode, synthesize a comparison. */
23538 if (mode != TImode)
23539 x = gen_rtx_COMPARE (CCmode, x, oldval);
23540 else
23542 rtx xor1_result = gen_reg_rtx (DImode);
23543 rtx xor2_result = gen_reg_rtx (DImode);
23544 rtx or_result = gen_reg_rtx (DImode);
23545 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23546 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23547 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23548 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23550 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23551 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23552 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23553 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23556 emit_insn (gen_rtx_SET (cond, x));
23558 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23559 emit_unlikely_jump (x, label2);
23561 x = newval;
23562 if (mask)
23563 x = rs6000_mask_atomic_subword (retval, newval, mask);
23565 emit_store_conditional (orig_mode, cond, mem, x);
23567 if (!is_weak)
23569 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23570 emit_unlikely_jump (x, label1);
23573 if (!is_mm_relaxed (mod_f))
23574 emit_label (XEXP (label2, 0));
23576 rs6000_post_atomic_barrier (mod_s);
23578 if (is_mm_relaxed (mod_f))
23579 emit_label (XEXP (label2, 0));
23581 if (shift)
23582 rs6000_finish_atomic_subword (operands[1], retval, shift);
23583 else if (mode != GET_MODE (operands[1]))
23584 convert_move (operands[1], retval, 1);
23586 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23587 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23588 emit_insn (gen_rtx_SET (boolval, x));
23591 /* Expand an atomic exchange operation. */
23593 void
23594 rs6000_expand_atomic_exchange (rtx operands[])
23596 rtx retval, mem, val, cond;
23597 machine_mode mode;
23598 enum memmodel model;
23599 rtx label, x, mask, shift;
23601 retval = operands[0];
23602 mem = operands[1];
23603 val = operands[2];
23604 model = memmodel_base (INTVAL (operands[3]));
23605 mode = GET_MODE (mem);
23607 mask = shift = NULL_RTX;
23608 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23610 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23612 /* Shift and mask VAL into position with the word. */
23613 val = convert_modes (SImode, mode, val, 1);
23614 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23615 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23617 /* Prepare to adjust the return value. */
23618 retval = gen_reg_rtx (SImode);
23619 mode = SImode;
23622 mem = rs6000_pre_atomic_barrier (mem, model);
23624 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23625 emit_label (XEXP (label, 0));
23627 emit_load_locked (mode, retval, mem);
23629 x = val;
23630 if (mask)
23631 x = rs6000_mask_atomic_subword (retval, val, mask);
23633 cond = gen_reg_rtx (CCmode);
23634 emit_store_conditional (mode, cond, mem, x);
23636 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23637 emit_unlikely_jump (x, label);
23639 rs6000_post_atomic_barrier (model);
23641 if (shift)
23642 rs6000_finish_atomic_subword (operands[0], retval, shift);
23645 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23646 to perform. MEM is the memory on which to operate. VAL is the second
23647 operand of the binary operator. BEFORE and AFTER are optional locations to
23648 return the value of MEM either before of after the operation. MODEL_RTX
23649 is a CONST_INT containing the memory model to use. */
23651 void
23652 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23653 rtx orig_before, rtx orig_after, rtx model_rtx)
23655 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23656 machine_mode mode = GET_MODE (mem);
23657 machine_mode store_mode = mode;
23658 rtx label, x, cond, mask, shift;
23659 rtx before = orig_before, after = orig_after;
23661 mask = shift = NULL_RTX;
23662 /* On power8, we want to use SImode for the operation. On previous systems,
23663 use the operation in a subword and shift/mask to get the proper byte or
23664 halfword. */
23665 if (mode == QImode || mode == HImode)
23667 if (TARGET_SYNC_HI_QI)
23669 val = convert_modes (SImode, mode, val, 1);
23671 /* Prepare to adjust the return value. */
23672 before = gen_reg_rtx (SImode);
23673 if (after)
23674 after = gen_reg_rtx (SImode);
23675 mode = SImode;
23677 else
23679 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23681 /* Shift and mask VAL into position with the word. */
23682 val = convert_modes (SImode, mode, val, 1);
23683 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23684 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23686 switch (code)
23688 case IOR:
23689 case XOR:
23690 /* We've already zero-extended VAL. That is sufficient to
23691 make certain that it does not affect other bits. */
23692 mask = NULL;
23693 break;
23695 case AND:
23696 /* If we make certain that all of the other bits in VAL are
23697 set, that will be sufficient to not affect other bits. */
23698 x = gen_rtx_NOT (SImode, mask);
23699 x = gen_rtx_IOR (SImode, x, val);
23700 emit_insn (gen_rtx_SET (val, x));
23701 mask = NULL;
23702 break;
23704 case NOT:
23705 case PLUS:
23706 case MINUS:
23707 /* These will all affect bits outside the field and need
23708 adjustment via MASK within the loop. */
23709 break;
23711 default:
23712 gcc_unreachable ();
23715 /* Prepare to adjust the return value. */
23716 before = gen_reg_rtx (SImode);
23717 if (after)
23718 after = gen_reg_rtx (SImode);
23719 store_mode = mode = SImode;
23723 mem = rs6000_pre_atomic_barrier (mem, model);
23725 label = gen_label_rtx ();
23726 emit_label (label);
23727 label = gen_rtx_LABEL_REF (VOIDmode, label);
23729 if (before == NULL_RTX)
23730 before = gen_reg_rtx (mode);
23732 emit_load_locked (mode, before, mem);
23734 if (code == NOT)
23736 x = expand_simple_binop (mode, AND, before, val,
23737 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23738 after = expand_simple_unop (mode, NOT, x, after, 1);
23740 else
23742 after = expand_simple_binop (mode, code, before, val,
23743 after, 1, OPTAB_LIB_WIDEN);
23746 x = after;
23747 if (mask)
23749 x = expand_simple_binop (SImode, AND, after, mask,
23750 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23751 x = rs6000_mask_atomic_subword (before, x, mask);
23753 else if (store_mode != mode)
23754 x = convert_modes (store_mode, mode, x, 1);
23756 cond = gen_reg_rtx (CCmode);
23757 emit_store_conditional (store_mode, cond, mem, x);
23759 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23760 emit_unlikely_jump (x, label);
23762 rs6000_post_atomic_barrier (model);
23764 if (shift)
23766 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23767 then do the calcuations in a SImode register. */
23768 if (orig_before)
23769 rs6000_finish_atomic_subword (orig_before, before, shift);
23770 if (orig_after)
23771 rs6000_finish_atomic_subword (orig_after, after, shift);
23773 else if (store_mode != mode)
23775 /* QImode/HImode on machines with lbarx/lharx where we do the native
23776 operation and then do the calcuations in a SImode register. */
23777 if (orig_before)
23778 convert_move (orig_before, before, 1);
23779 if (orig_after)
23780 convert_move (orig_after, after, 1);
23782 else if (orig_after && after != orig_after)
23783 emit_move_insn (orig_after, after);
23786 /* Emit instructions to move SRC to DST. Called by splitters for
23787 multi-register moves. It will emit at most one instruction for
23788 each register that is accessed; that is, it won't emit li/lis pairs
23789 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23790 register. */
23792 void
23793 rs6000_split_multireg_move (rtx dst, rtx src)
23795 /* The register number of the first register being moved. */
23796 int reg;
23797 /* The mode that is to be moved. */
23798 machine_mode mode;
23799 /* The mode that the move is being done in, and its size. */
23800 machine_mode reg_mode;
23801 int reg_mode_size;
23802 /* The number of registers that will be moved. */
23803 int nregs;
23805 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23806 mode = GET_MODE (dst);
23807 nregs = hard_regno_nregs[reg][mode];
23808 if (FP_REGNO_P (reg))
23809 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23810 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23811 else if (ALTIVEC_REGNO_P (reg))
23812 reg_mode = V16QImode;
23813 else if (TARGET_E500_DOUBLE && FLOAT128_2REG_P (mode))
23814 reg_mode = DFmode;
23815 else
23816 reg_mode = word_mode;
23817 reg_mode_size = GET_MODE_SIZE (reg_mode);
23819 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23821 /* TDmode residing in FP registers is special, since the ISA requires that
23822 the lower-numbered word of a register pair is always the most significant
23823 word, even in little-endian mode. This does not match the usual subreg
23824 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23825 the appropriate constituent registers "by hand" in little-endian mode.
23827 Note we do not need to check for destructive overlap here since TDmode
23828 can only reside in even/odd register pairs. */
23829 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23831 rtx p_src, p_dst;
23832 int i;
23834 for (i = 0; i < nregs; i++)
23836 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23837 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23838 else
23839 p_src = simplify_gen_subreg (reg_mode, src, mode,
23840 i * reg_mode_size);
23842 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23843 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23844 else
23845 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23846 i * reg_mode_size);
23848 emit_insn (gen_rtx_SET (p_dst, p_src));
23851 return;
23854 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23856 /* Move register range backwards, if we might have destructive
23857 overlap. */
23858 int i;
23859 for (i = nregs - 1; i >= 0; i--)
23860 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23861 i * reg_mode_size),
23862 simplify_gen_subreg (reg_mode, src, mode,
23863 i * reg_mode_size)));
23865 else
23867 int i;
23868 int j = -1;
23869 bool used_update = false;
23870 rtx restore_basereg = NULL_RTX;
23872 if (MEM_P (src) && INT_REGNO_P (reg))
23874 rtx breg;
23876 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23877 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23879 rtx delta_rtx;
23880 breg = XEXP (XEXP (src, 0), 0);
23881 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23882 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23883 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23884 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23885 src = replace_equiv_address (src, breg);
23887 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23889 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23891 rtx basereg = XEXP (XEXP (src, 0), 0);
23892 if (TARGET_UPDATE)
23894 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23895 emit_insn (gen_rtx_SET (ndst,
23896 gen_rtx_MEM (reg_mode,
23897 XEXP (src, 0))));
23898 used_update = true;
23900 else
23901 emit_insn (gen_rtx_SET (basereg,
23902 XEXP (XEXP (src, 0), 1)));
23903 src = replace_equiv_address (src, basereg);
23905 else
23907 rtx basereg = gen_rtx_REG (Pmode, reg);
23908 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23909 src = replace_equiv_address (src, basereg);
23913 breg = XEXP (src, 0);
23914 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23915 breg = XEXP (breg, 0);
23917 /* If the base register we are using to address memory is
23918 also a destination reg, then change that register last. */
23919 if (REG_P (breg)
23920 && REGNO (breg) >= REGNO (dst)
23921 && REGNO (breg) < REGNO (dst) + nregs)
23922 j = REGNO (breg) - REGNO (dst);
23924 else if (MEM_P (dst) && INT_REGNO_P (reg))
23926 rtx breg;
23928 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23929 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23931 rtx delta_rtx;
23932 breg = XEXP (XEXP (dst, 0), 0);
23933 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23934 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23935 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23937 /* We have to update the breg before doing the store.
23938 Use store with update, if available. */
23940 if (TARGET_UPDATE)
23942 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23943 emit_insn (TARGET_32BIT
23944 ? (TARGET_POWERPC64
23945 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23946 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23947 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23948 used_update = true;
23950 else
23951 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23952 dst = replace_equiv_address (dst, breg);
23954 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
23955 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23957 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23959 rtx basereg = XEXP (XEXP (dst, 0), 0);
23960 if (TARGET_UPDATE)
23962 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23963 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23964 XEXP (dst, 0)),
23965 nsrc));
23966 used_update = true;
23968 else
23969 emit_insn (gen_rtx_SET (basereg,
23970 XEXP (XEXP (dst, 0), 1)));
23971 dst = replace_equiv_address (dst, basereg);
23973 else
23975 rtx basereg = XEXP (XEXP (dst, 0), 0);
23976 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23977 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23978 && REG_P (basereg)
23979 && REG_P (offsetreg)
23980 && REGNO (basereg) != REGNO (offsetreg));
23981 if (REGNO (basereg) == 0)
23983 rtx tmp = offsetreg;
23984 offsetreg = basereg;
23985 basereg = tmp;
23987 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23988 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23989 dst = replace_equiv_address (dst, basereg);
23992 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23993 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
23996 for (i = 0; i < nregs; i++)
23998 /* Calculate index to next subword. */
23999 ++j;
24000 if (j == nregs)
24001 j = 0;
24003 /* If compiler already emitted move of first word by
24004 store with update, no need to do anything. */
24005 if (j == 0 && used_update)
24006 continue;
24008 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24009 j * reg_mode_size),
24010 simplify_gen_subreg (reg_mode, src, mode,
24011 j * reg_mode_size)));
24013 if (restore_basereg != NULL_RTX)
24014 emit_insn (restore_basereg);
24019 /* This page contains routines that are used to determine what the
24020 function prologue and epilogue code will do and write them out. */
24022 static inline bool
24023 save_reg_p (int r)
24025 return !call_used_regs[r] && df_regs_ever_live_p (r);
24028 /* Determine whether the gp REG is really used. */
24030 static bool
24031 rs6000_reg_live_or_pic_offset_p (int reg)
24033 /* We need to mark the PIC offset register live for the same conditions
24034 as it is set up, or otherwise it won't be saved before we clobber it. */
24036 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24038 if (TARGET_TOC && TARGET_MINIMAL_TOC
24039 && (crtl->calls_eh_return
24040 || df_regs_ever_live_p (reg)
24041 || get_pool_size ()))
24042 return true;
24044 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24045 && flag_pic)
24046 return true;
24049 /* If the function calls eh_return, claim used all the registers that would
24050 be checked for liveness otherwise. */
24052 return ((crtl->calls_eh_return || df_regs_ever_live_p (reg))
24053 && !call_used_regs[reg]);
24056 /* Return the first fixed-point register that is required to be
24057 saved. 32 if none. */
24060 first_reg_to_save (void)
24062 int first_reg;
24064 /* Find lowest numbered live register. */
24065 for (first_reg = 13; first_reg <= 31; first_reg++)
24066 if (save_reg_p (first_reg))
24067 break;
24069 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
24070 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
24071 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24072 || (TARGET_TOC && TARGET_MINIMAL_TOC))
24073 && rs6000_reg_live_or_pic_offset_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
24074 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
24076 #if TARGET_MACHO
24077 if (flag_pic
24078 && crtl->uses_pic_offset_table
24079 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24080 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24081 #endif
24083 return first_reg;
24086 /* Similar, for FP regs. */
24089 first_fp_reg_to_save (void)
24091 int first_reg;
24093 /* Find lowest numbered live register. */
24094 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24095 if (save_reg_p (first_reg))
24096 break;
24098 return first_reg;
24101 /* Similar, for AltiVec regs. */
24103 static int
24104 first_altivec_reg_to_save (void)
24106 int i;
24108 /* Stack frame remains as is unless we are in AltiVec ABI. */
24109 if (! TARGET_ALTIVEC_ABI)
24110 return LAST_ALTIVEC_REGNO + 1;
24112 /* On Darwin, the unwind routines are compiled without
24113 TARGET_ALTIVEC, and use save_world to save/restore the
24114 altivec registers when necessary. */
24115 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24116 && ! TARGET_ALTIVEC)
24117 return FIRST_ALTIVEC_REGNO + 20;
24119 /* Find lowest numbered live register. */
24120 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24121 if (save_reg_p (i))
24122 break;
24124 return i;
24127 /* Return a 32-bit mask of the AltiVec registers we need to set in
24128 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24129 the 32-bit word is 0. */
24131 static unsigned int
24132 compute_vrsave_mask (void)
24134 unsigned int i, mask = 0;
24136 /* On Darwin, the unwind routines are compiled without
24137 TARGET_ALTIVEC, and use save_world to save/restore the
24138 call-saved altivec registers when necessary. */
24139 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24140 && ! TARGET_ALTIVEC)
24141 mask |= 0xFFF;
24143 /* First, find out if we use _any_ altivec registers. */
24144 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24145 if (df_regs_ever_live_p (i))
24146 mask |= ALTIVEC_REG_BIT (i);
24148 if (mask == 0)
24149 return mask;
24151 /* Next, remove the argument registers from the set. These must
24152 be in the VRSAVE mask set by the caller, so we don't need to add
24153 them in again. More importantly, the mask we compute here is
24154 used to generate CLOBBERs in the set_vrsave insn, and we do not
24155 wish the argument registers to die. */
24156 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24157 mask &= ~ALTIVEC_REG_BIT (i);
24159 /* Similarly, remove the return value from the set. */
24161 bool yes = false;
24162 diddle_return_value (is_altivec_return_reg, &yes);
24163 if (yes)
24164 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24167 return mask;
24170 /* For a very restricted set of circumstances, we can cut down the
24171 size of prologues/epilogues by calling our own save/restore-the-world
24172 routines. */
24174 static void
24175 compute_save_world_info (rs6000_stack_t *info)
24177 info->world_save_p = 1;
24178 info->world_save_p
24179 = (WORLD_SAVE_P (info)
24180 && DEFAULT_ABI == ABI_DARWIN
24181 && !cfun->has_nonlocal_label
24182 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24183 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24184 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24185 && info->cr_save_p);
24187 /* This will not work in conjunction with sibcalls. Make sure there
24188 are none. (This check is expensive, but seldom executed.) */
24189 if (WORLD_SAVE_P (info))
24191 rtx_insn *insn;
24192 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24193 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24195 info->world_save_p = 0;
24196 break;
24200 if (WORLD_SAVE_P (info))
24202 /* Even if we're not touching VRsave, make sure there's room on the
24203 stack for it, if it looks like we're calling SAVE_WORLD, which
24204 will attempt to save it. */
24205 info->vrsave_size = 4;
24207 /* If we are going to save the world, we need to save the link register too. */
24208 info->lr_save_p = 1;
24210 /* "Save" the VRsave register too if we're saving the world. */
24211 if (info->vrsave_mask == 0)
24212 info->vrsave_mask = compute_vrsave_mask ();
24214 /* Because the Darwin register save/restore routines only handle
24215 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24216 check. */
24217 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24218 && (info->first_altivec_reg_save
24219 >= FIRST_SAVED_ALTIVEC_REGNO));
24222 return;
24226 static void
24227 is_altivec_return_reg (rtx reg, void *xyes)
24229 bool *yes = (bool *) xyes;
24230 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24231 *yes = true;
24235 /* Return whether REG is a global user reg or has been specifed by
24236 -ffixed-REG. We should not restore these, and so cannot use
24237 lmw or out-of-line restore functions if there are any. We also
24238 can't save them (well, emit frame notes for them), because frame
24239 unwinding during exception handling will restore saved registers. */
24241 static bool
24242 fixed_reg_p (int reg)
24244 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24245 backend sets it, overriding anything the user might have given. */
24246 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24247 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24248 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24249 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24250 return false;
24252 return fixed_regs[reg];
24255 /* Determine the strategy for savings/restoring registers. */
24257 enum {
24258 SAVE_MULTIPLE = 0x1,
24259 SAVE_INLINE_GPRS = 0x2,
24260 SAVE_INLINE_FPRS = 0x4,
24261 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24262 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24263 SAVE_INLINE_VRS = 0x20,
24264 REST_MULTIPLE = 0x100,
24265 REST_INLINE_GPRS = 0x200,
24266 REST_INLINE_FPRS = 0x400,
24267 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24268 REST_INLINE_VRS = 0x1000
24271 static int
24272 rs6000_savres_strategy (rs6000_stack_t *info,
24273 bool using_static_chain_p)
24275 int strategy = 0;
24277 /* Select between in-line and out-of-line save and restore of regs.
24278 First, all the obvious cases where we don't use out-of-line. */
24279 if (crtl->calls_eh_return
24280 || cfun->machine->ra_need_lr)
24281 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24282 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24283 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24285 if (info->first_gp_reg_save == 32)
24286 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24288 if (info->first_fp_reg_save == 64
24289 /* The out-of-line FP routines use double-precision stores;
24290 we can't use those routines if we don't have such stores. */
24291 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24292 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24294 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24295 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24297 /* Define cutoff for using out-of-line functions to save registers. */
24298 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24300 if (!optimize_size)
24302 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24303 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24304 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24306 else
24308 /* Prefer out-of-line restore if it will exit. */
24309 if (info->first_fp_reg_save > 61)
24310 strategy |= SAVE_INLINE_FPRS;
24311 if (info->first_gp_reg_save > 29)
24313 if (info->first_fp_reg_save == 64)
24314 strategy |= SAVE_INLINE_GPRS;
24315 else
24316 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24318 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24319 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24322 else if (DEFAULT_ABI == ABI_DARWIN)
24324 if (info->first_fp_reg_save > 60)
24325 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24326 if (info->first_gp_reg_save > 29)
24327 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24328 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24330 else
24332 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24333 if (info->first_fp_reg_save > 61)
24334 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24335 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24336 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24339 /* Don't bother to try to save things out-of-line if r11 is occupied
24340 by the static chain. It would require too much fiddling and the
24341 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24342 pointer on Darwin, and AIX uses r1 or r12. */
24343 if (using_static_chain_p
24344 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24345 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24346 | SAVE_INLINE_GPRS
24347 | SAVE_INLINE_VRS);
24349 /* Saving CR interferes with the exit routines used on the SPE, so
24350 just punt here. */
24351 if (TARGET_SPE_ABI
24352 && info->spe_64bit_regs_used
24353 && info->cr_save_p)
24354 strategy |= REST_INLINE_GPRS;
24356 /* We can only use the out-of-line routines to restore fprs if we've
24357 saved all the registers from first_fp_reg_save in the prologue.
24358 Otherwise, we risk loading garbage. Of course, if we have saved
24359 out-of-line then we know we haven't skipped any fprs. */
24360 if ((strategy & SAVE_INLINE_FPRS)
24361 && !(strategy & REST_INLINE_FPRS))
24363 int i;
24365 for (i = info->first_fp_reg_save; i < 64; i++)
24366 if (fixed_regs[i] || !save_reg_p (i))
24368 strategy |= REST_INLINE_FPRS;
24369 break;
24373 /* Similarly, for altivec regs. */
24374 if ((strategy & SAVE_INLINE_VRS)
24375 && !(strategy & REST_INLINE_VRS))
24377 int i;
24379 for (i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24380 if (fixed_regs[i] || !save_reg_p (i))
24382 strategy |= REST_INLINE_VRS;
24383 break;
24387 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24388 saved is an out-of-line save or restore. Set up the value for
24389 the next test (excluding out-of-line gprs). */
24390 bool lr_save_p = (info->lr_save_p
24391 || !(strategy & SAVE_INLINE_FPRS)
24392 || !(strategy & SAVE_INLINE_VRS)
24393 || !(strategy & REST_INLINE_FPRS)
24394 || !(strategy & REST_INLINE_VRS));
24396 if (TARGET_MULTIPLE
24397 && !TARGET_POWERPC64
24398 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
24399 && info->first_gp_reg_save < 31)
24401 /* Prefer store multiple for saves over out-of-line routines,
24402 since the store-multiple instruction will always be smaller. */
24403 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24405 /* The situation is more complicated with load multiple. We'd
24406 prefer to use the out-of-line routines for restores, since the
24407 "exit" out-of-line routines can handle the restore of LR and the
24408 frame teardown. However if doesn't make sense to use the
24409 out-of-line routine if that is the only reason we'd need to save
24410 LR, and we can't use the "exit" out-of-line gpr restore if we
24411 have saved some fprs; In those cases it is advantageous to use
24412 load multiple when available. */
24413 if (info->first_fp_reg_save != 64 || !lr_save_p)
24414 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24417 /* Using the "exit" out-of-line routine does not improve code size
24418 if using it would require lr to be saved and if only saving one
24419 or two gprs. */
24420 else if (!lr_save_p && info->first_gp_reg_save > 29)
24421 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24423 /* We can only use load multiple or the out-of-line routines to
24424 restore gprs if we've saved all the registers from
24425 first_gp_reg_save. Otherwise, we risk loading garbage.
24426 Of course, if we have saved out-of-line or used stmw then we know
24427 we haven't skipped any gprs. */
24428 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24429 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24431 int i;
24433 for (i = info->first_gp_reg_save; i < 32; i++)
24434 if (fixed_reg_p (i) || !save_reg_p (i))
24436 strategy |= REST_INLINE_GPRS;
24437 strategy &= ~REST_MULTIPLE;
24438 break;
24442 if (TARGET_ELF && TARGET_64BIT)
24444 if (!(strategy & SAVE_INLINE_FPRS))
24445 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24446 else if (!(strategy & SAVE_INLINE_GPRS)
24447 && info->first_fp_reg_save == 64)
24448 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24450 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24451 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24453 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24454 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24456 return strategy;
24459 /* Calculate the stack information for the current function. This is
24460 complicated by having two separate calling sequences, the AIX calling
24461 sequence and the V.4 calling sequence.
24463 AIX (and Darwin/Mac OS X) stack frames look like:
24464 32-bit 64-bit
24465 SP----> +---------------------------------------+
24466 | back chain to caller | 0 0
24467 +---------------------------------------+
24468 | saved CR | 4 8 (8-11)
24469 +---------------------------------------+
24470 | saved LR | 8 16
24471 +---------------------------------------+
24472 | reserved for compilers | 12 24
24473 +---------------------------------------+
24474 | reserved for binders | 16 32
24475 +---------------------------------------+
24476 | saved TOC pointer | 20 40
24477 +---------------------------------------+
24478 | Parameter save area (P) | 24 48
24479 +---------------------------------------+
24480 | Alloca space (A) | 24+P etc.
24481 +---------------------------------------+
24482 | Local variable space (L) | 24+P+A
24483 +---------------------------------------+
24484 | Float/int conversion temporary (X) | 24+P+A+L
24485 +---------------------------------------+
24486 | Save area for AltiVec registers (W) | 24+P+A+L+X
24487 +---------------------------------------+
24488 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24489 +---------------------------------------+
24490 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24491 +---------------------------------------+
24492 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24493 +---------------------------------------+
24494 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24495 +---------------------------------------+
24496 old SP->| back chain to caller's caller |
24497 +---------------------------------------+
24499 The required alignment for AIX configurations is two words (i.e., 8
24500 or 16 bytes).
24502 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24504 SP----> +---------------------------------------+
24505 | Back chain to caller | 0
24506 +---------------------------------------+
24507 | Save area for CR | 8
24508 +---------------------------------------+
24509 | Saved LR | 16
24510 +---------------------------------------+
24511 | Saved TOC pointer | 24
24512 +---------------------------------------+
24513 | Parameter save area (P) | 32
24514 +---------------------------------------+
24515 | Alloca space (A) | 32+P
24516 +---------------------------------------+
24517 | Local variable space (L) | 32+P+A
24518 +---------------------------------------+
24519 | Save area for AltiVec registers (W) | 32+P+A+L
24520 +---------------------------------------+
24521 | AltiVec alignment padding (Y) | 32+P+A+L+W
24522 +---------------------------------------+
24523 | Save area for GP registers (G) | 32+P+A+L+W+Y
24524 +---------------------------------------+
24525 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24526 +---------------------------------------+
24527 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24528 +---------------------------------------+
24531 V.4 stack frames look like:
24533 SP----> +---------------------------------------+
24534 | back chain to caller | 0
24535 +---------------------------------------+
24536 | caller's saved LR | 4
24537 +---------------------------------------+
24538 | Parameter save area (P) | 8
24539 +---------------------------------------+
24540 | Alloca space (A) | 8+P
24541 +---------------------------------------+
24542 | Varargs save area (V) | 8+P+A
24543 +---------------------------------------+
24544 | Local variable space (L) | 8+P+A+V
24545 +---------------------------------------+
24546 | Float/int conversion temporary (X) | 8+P+A+V+L
24547 +---------------------------------------+
24548 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24549 +---------------------------------------+
24550 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24551 +---------------------------------------+
24552 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24553 +---------------------------------------+
24554 | SPE: area for 64-bit GP registers |
24555 +---------------------------------------+
24556 | SPE alignment padding |
24557 +---------------------------------------+
24558 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24559 +---------------------------------------+
24560 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24561 +---------------------------------------+
24562 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24563 +---------------------------------------+
24564 old SP->| back chain to caller's caller |
24565 +---------------------------------------+
24567 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24568 given. (But note below and in sysv4.h that we require only 8 and
24569 may round up the size of our stack frame anyways. The historical
24570 reason is early versions of powerpc-linux which didn't properly
24571 align the stack at program startup. A happy side-effect is that
24572 -mno-eabi libraries can be used with -meabi programs.)
24574 The EABI configuration defaults to the V.4 layout. However,
24575 the stack alignment requirements may differ. If -mno-eabi is not
24576 given, the required stack alignment is 8 bytes; if -mno-eabi is
24577 given, the required alignment is 16 bytes. (But see V.4 comment
24578 above.) */
24580 #ifndef ABI_STACK_BOUNDARY
24581 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24582 #endif
24584 static rs6000_stack_t *
24585 rs6000_stack_info (void)
24587 /* We should never be called for thunks, we are not set up for that. */
24588 gcc_assert (!cfun->is_thunk);
24590 rs6000_stack_t *info = &stack_info;
24591 int reg_size = TARGET_32BIT ? 4 : 8;
24592 int ehrd_size;
24593 int ehcr_size;
24594 int save_align;
24595 int first_gp;
24596 HOST_WIDE_INT non_fixed_size;
24597 bool using_static_chain_p;
24599 if (reload_completed && info->reload_completed)
24600 return info;
24602 memset (info, 0, sizeof (*info));
24603 info->reload_completed = reload_completed;
24605 if (TARGET_SPE)
24607 /* Cache value so we don't rescan instruction chain over and over. */
24608 if (cfun->machine->spe_insn_chain_scanned_p == 0)
24609 cfun->machine->spe_insn_chain_scanned_p
24610 = spe_func_has_64bit_regs_p () + 1;
24611 info->spe_64bit_regs_used = cfun->machine->spe_insn_chain_scanned_p - 1;
24614 /* Select which calling sequence. */
24615 info->abi = DEFAULT_ABI;
24617 /* Calculate which registers need to be saved & save area size. */
24618 info->first_gp_reg_save = first_reg_to_save ();
24619 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24620 even if it currently looks like we won't. Reload may need it to
24621 get at a constant; if so, it will have already created a constant
24622 pool entry for it. */
24623 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24624 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24625 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24626 && crtl->uses_const_pool
24627 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24628 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24629 else
24630 first_gp = info->first_gp_reg_save;
24632 info->gp_size = reg_size * (32 - first_gp);
24634 /* For the SPE, we have an additional upper 32-bits on each GPR.
24635 Ideally we should save the entire 64-bits only when the upper
24636 half is used in SIMD instructions. Since we only record
24637 registers live (not the size they are used in), this proves
24638 difficult because we'd have to traverse the instruction chain at
24639 the right time, taking reload into account. This is a real pain,
24640 so we opt to save the GPRs in 64-bits always if but one register
24641 gets used in 64-bits. Otherwise, all the registers in the frame
24642 get saved in 32-bits.
24644 So... since when we save all GPRs (except the SP) in 64-bits, the
24645 traditional GP save area will be empty. */
24646 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24647 info->gp_size = 0;
24649 info->first_fp_reg_save = first_fp_reg_to_save ();
24650 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24652 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24653 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24654 - info->first_altivec_reg_save);
24656 /* Does this function call anything? */
24657 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24659 /* Determine if we need to save the condition code registers. */
24660 if (save_reg_p (CR2_REGNO)
24661 || save_reg_p (CR3_REGNO)
24662 || save_reg_p (CR4_REGNO))
24664 info->cr_save_p = 1;
24665 if (DEFAULT_ABI == ABI_V4)
24666 info->cr_size = reg_size;
24669 /* If the current function calls __builtin_eh_return, then we need
24670 to allocate stack space for registers that will hold data for
24671 the exception handler. */
24672 if (crtl->calls_eh_return)
24674 unsigned int i;
24675 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24676 continue;
24678 /* SPE saves EH registers in 64-bits. */
24679 ehrd_size = i * (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0
24680 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
24682 else
24683 ehrd_size = 0;
24685 /* In the ELFv2 ABI, we also need to allocate space for separate
24686 CR field save areas if the function calls __builtin_eh_return. */
24687 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24689 /* This hard-codes that we have three call-saved CR fields. */
24690 ehcr_size = 3 * reg_size;
24691 /* We do *not* use the regular CR save mechanism. */
24692 info->cr_save_p = 0;
24694 else
24695 ehcr_size = 0;
24697 /* Determine various sizes. */
24698 info->reg_size = reg_size;
24699 info->fixed_size = RS6000_SAVE_AREA;
24700 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24701 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24702 TARGET_ALTIVEC ? 16 : 8);
24703 if (FRAME_GROWS_DOWNWARD)
24704 info->vars_size
24705 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24706 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24707 - (info->fixed_size + info->vars_size + info->parm_size);
24709 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24710 info->spe_gp_size = 8 * (32 - first_gp);
24712 if (TARGET_ALTIVEC_ABI)
24713 info->vrsave_mask = compute_vrsave_mask ();
24715 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24716 info->vrsave_size = 4;
24718 compute_save_world_info (info);
24720 /* Calculate the offsets. */
24721 switch (DEFAULT_ABI)
24723 case ABI_NONE:
24724 default:
24725 gcc_unreachable ();
24727 case ABI_AIX:
24728 case ABI_ELFv2:
24729 case ABI_DARWIN:
24730 info->fp_save_offset = -info->fp_size;
24731 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24733 if (TARGET_ALTIVEC_ABI)
24735 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24737 /* Align stack so vector save area is on a quadword boundary.
24738 The padding goes above the vectors. */
24739 if (info->altivec_size != 0)
24740 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24742 info->altivec_save_offset = info->vrsave_save_offset
24743 - info->altivec_padding_size
24744 - info->altivec_size;
24745 gcc_assert (info->altivec_size == 0
24746 || info->altivec_save_offset % 16 == 0);
24748 /* Adjust for AltiVec case. */
24749 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24751 else
24752 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24754 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24755 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24756 info->lr_save_offset = 2*reg_size;
24757 break;
24759 case ABI_V4:
24760 info->fp_save_offset = -info->fp_size;
24761 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24762 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24764 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24766 /* Align stack so SPE GPR save area is aligned on a
24767 double-word boundary. */
24768 if (info->spe_gp_size != 0 && info->cr_save_offset != 0)
24769 info->spe_padding_size = 8 - (-info->cr_save_offset % 8);
24770 else
24771 info->spe_padding_size = 0;
24773 info->spe_gp_save_offset = info->cr_save_offset
24774 - info->spe_padding_size
24775 - info->spe_gp_size;
24777 /* Adjust for SPE case. */
24778 info->ehrd_offset = info->spe_gp_save_offset;
24780 else if (TARGET_ALTIVEC_ABI)
24782 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24784 /* Align stack so vector save area is on a quadword boundary. */
24785 if (info->altivec_size != 0)
24786 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24788 info->altivec_save_offset = info->vrsave_save_offset
24789 - info->altivec_padding_size
24790 - info->altivec_size;
24792 /* Adjust for AltiVec case. */
24793 info->ehrd_offset = info->altivec_save_offset;
24795 else
24796 info->ehrd_offset = info->cr_save_offset;
24798 info->ehrd_offset -= ehrd_size;
24799 info->lr_save_offset = reg_size;
24802 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24803 info->save_size = RS6000_ALIGN (info->fp_size
24804 + info->gp_size
24805 + info->altivec_size
24806 + info->altivec_padding_size
24807 + info->spe_gp_size
24808 + info->spe_padding_size
24809 + ehrd_size
24810 + ehcr_size
24811 + info->cr_size
24812 + info->vrsave_size,
24813 save_align);
24815 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24817 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24818 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24820 /* Determine if we need to save the link register. */
24821 if (info->calls_p
24822 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24823 && crtl->profile
24824 && !TARGET_PROFILE_KERNEL)
24825 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24826 #ifdef TARGET_RELOCATABLE
24827 || (DEFAULT_ABI == ABI_V4
24828 && (TARGET_RELOCATABLE || flag_pic > 1)
24829 && get_pool_size () != 0)
24830 #endif
24831 || rs6000_ra_ever_killed ())
24832 info->lr_save_p = 1;
24834 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24835 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24836 && call_used_regs[STATIC_CHAIN_REGNUM]);
24837 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24839 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24840 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24841 || !(info->savres_strategy & SAVE_INLINE_VRS)
24842 || !(info->savres_strategy & REST_INLINE_GPRS)
24843 || !(info->savres_strategy & REST_INLINE_FPRS)
24844 || !(info->savres_strategy & REST_INLINE_VRS))
24845 info->lr_save_p = 1;
24847 if (info->lr_save_p)
24848 df_set_regs_ever_live (LR_REGNO, true);
24850 /* Determine if we need to allocate any stack frame:
24852 For AIX we need to push the stack if a frame pointer is needed
24853 (because the stack might be dynamically adjusted), if we are
24854 debugging, if we make calls, or if the sum of fp_save, gp_save,
24855 and local variables are more than the space needed to save all
24856 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24857 + 18*8 = 288 (GPR13 reserved).
24859 For V.4 we don't have the stack cushion that AIX uses, but assume
24860 that the debugger can handle stackless frames. */
24862 if (info->calls_p)
24863 info->push_p = 1;
24865 else if (DEFAULT_ABI == ABI_V4)
24866 info->push_p = non_fixed_size != 0;
24868 else if (frame_pointer_needed)
24869 info->push_p = 1;
24871 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24872 info->push_p = 1;
24874 else
24875 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24877 return info;
24880 /* Return true if the current function uses any GPRs in 64-bit SIMD
24881 mode. */
24883 static bool
24884 spe_func_has_64bit_regs_p (void)
24886 rtx_insn *insns, *insn;
24888 /* Functions that save and restore all the call-saved registers will
24889 need to save/restore the registers in 64-bits. */
24890 if (crtl->calls_eh_return
24891 || cfun->calls_setjmp
24892 || crtl->has_nonlocal_goto)
24893 return true;
24895 insns = get_insns ();
24897 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
24899 if (INSN_P (insn))
24901 rtx i;
24903 /* FIXME: This should be implemented with attributes...
24905 (set_attr "spe64" "true")....then,
24906 if (get_spe64(insn)) return true;
24908 It's the only reliable way to do the stuff below. */
24910 i = PATTERN (insn);
24911 if (GET_CODE (i) == SET)
24913 machine_mode mode = GET_MODE (SET_SRC (i));
24915 if (SPE_VECTOR_MODE (mode))
24916 return true;
24917 if (TARGET_E500_DOUBLE
24918 && (mode == DFmode || FLOAT128_2REG_P (mode)))
24919 return true;
24924 return false;
24927 static void
24928 debug_stack_info (rs6000_stack_t *info)
24930 const char *abi_string;
24932 if (! info)
24933 info = rs6000_stack_info ();
24935 fprintf (stderr, "\nStack information for function %s:\n",
24936 ((current_function_decl && DECL_NAME (current_function_decl))
24937 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24938 : "<unknown>"));
24940 switch (info->abi)
24942 default: abi_string = "Unknown"; break;
24943 case ABI_NONE: abi_string = "NONE"; break;
24944 case ABI_AIX: abi_string = "AIX"; break;
24945 case ABI_ELFv2: abi_string = "ELFv2"; break;
24946 case ABI_DARWIN: abi_string = "Darwin"; break;
24947 case ABI_V4: abi_string = "V.4"; break;
24950 fprintf (stderr, "\tABI = %5s\n", abi_string);
24952 if (TARGET_ALTIVEC_ABI)
24953 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24955 if (TARGET_SPE_ABI)
24956 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
24958 if (info->first_gp_reg_save != 32)
24959 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24961 if (info->first_fp_reg_save != 64)
24962 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24964 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24965 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24966 info->first_altivec_reg_save);
24968 if (info->lr_save_p)
24969 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24971 if (info->cr_save_p)
24972 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24974 if (info->vrsave_mask)
24975 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24977 if (info->push_p)
24978 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24980 if (info->calls_p)
24981 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24983 if (info->gp_size)
24984 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24986 if (info->fp_size)
24987 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24989 if (info->altivec_size)
24990 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24991 info->altivec_save_offset);
24993 if (info->spe_gp_size)
24994 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
24995 info->spe_gp_save_offset);
24997 if (info->vrsave_size)
24998 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24999 info->vrsave_save_offset);
25001 if (info->lr_save_p)
25002 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
25004 if (info->cr_save_p)
25005 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25007 if (info->varargs_save_offset)
25008 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25010 if (info->total_size)
25011 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25012 info->total_size);
25014 if (info->vars_size)
25015 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25016 info->vars_size);
25018 if (info->parm_size)
25019 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25021 if (info->fixed_size)
25022 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25024 if (info->gp_size)
25025 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25027 if (info->spe_gp_size)
25028 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
25030 if (info->fp_size)
25031 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25033 if (info->altivec_size)
25034 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25036 if (info->vrsave_size)
25037 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25039 if (info->altivec_padding_size)
25040 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25041 info->altivec_padding_size);
25043 if (info->spe_padding_size)
25044 fprintf (stderr, "\tspe_padding_size = %5d\n",
25045 info->spe_padding_size);
25047 if (info->cr_size)
25048 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25050 if (info->save_size)
25051 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25053 if (info->reg_size != 4)
25054 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25056 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25058 fprintf (stderr, "\n");
25062 rs6000_return_addr (int count, rtx frame)
25064 /* Currently we don't optimize very well between prolog and body
25065 code and for PIC code the code can be actually quite bad, so
25066 don't try to be too clever here. */
25067 if (count != 0
25068 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25070 cfun->machine->ra_needs_full_frame = 1;
25072 return
25073 gen_rtx_MEM
25074 (Pmode,
25075 memory_address
25076 (Pmode,
25077 plus_constant (Pmode,
25078 copy_to_reg
25079 (gen_rtx_MEM (Pmode,
25080 memory_address (Pmode, frame))),
25081 RETURN_ADDRESS_OFFSET)));
25084 cfun->machine->ra_need_lr = 1;
25085 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25088 /* Say whether a function is a candidate for sibcall handling or not. */
25090 static bool
25091 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25093 tree fntype;
25095 if (decl)
25096 fntype = TREE_TYPE (decl);
25097 else
25098 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25100 /* We can't do it if the called function has more vector parameters
25101 than the current function; there's nowhere to put the VRsave code. */
25102 if (TARGET_ALTIVEC_ABI
25103 && TARGET_ALTIVEC_VRSAVE
25104 && !(decl && decl == current_function_decl))
25106 function_args_iterator args_iter;
25107 tree type;
25108 int nvreg = 0;
25110 /* Functions with vector parameters are required to have a
25111 prototype, so the argument type info must be available
25112 here. */
25113 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25114 if (TREE_CODE (type) == VECTOR_TYPE
25115 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25116 nvreg++;
25118 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25119 if (TREE_CODE (type) == VECTOR_TYPE
25120 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25121 nvreg--;
25123 if (nvreg > 0)
25124 return false;
25127 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25128 functions, because the callee may have a different TOC pointer to
25129 the caller and there's no way to ensure we restore the TOC when
25130 we return. With the secure-plt SYSV ABI we can't make non-local
25131 calls when -fpic/PIC because the plt call stubs use r30. */
25132 if (DEFAULT_ABI == ABI_DARWIN
25133 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25134 && decl
25135 && !DECL_EXTERNAL (decl)
25136 && !DECL_WEAK (decl)
25137 && (*targetm.binds_local_p) (decl))
25138 || (DEFAULT_ABI == ABI_V4
25139 && (!TARGET_SECURE_PLT
25140 || !flag_pic
25141 || (decl
25142 && (*targetm.binds_local_p) (decl)))))
25144 tree attr_list = TYPE_ATTRIBUTES (fntype);
25146 if (!lookup_attribute ("longcall", attr_list)
25147 || lookup_attribute ("shortcall", attr_list))
25148 return true;
25151 return false;
25154 static int
25155 rs6000_ra_ever_killed (void)
25157 rtx_insn *top;
25158 rtx reg;
25159 rtx_insn *insn;
25161 if (cfun->is_thunk)
25162 return 0;
25164 if (cfun->machine->lr_save_state)
25165 return cfun->machine->lr_save_state - 1;
25167 /* regs_ever_live has LR marked as used if any sibcalls are present,
25168 but this should not force saving and restoring in the
25169 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25170 clobbers LR, so that is inappropriate. */
25172 /* Also, the prologue can generate a store into LR that
25173 doesn't really count, like this:
25175 move LR->R0
25176 bcl to set PIC register
25177 move LR->R31
25178 move R0->LR
25180 When we're called from the epilogue, we need to avoid counting
25181 this as a store. */
25183 push_topmost_sequence ();
25184 top = get_insns ();
25185 pop_topmost_sequence ();
25186 reg = gen_rtx_REG (Pmode, LR_REGNO);
25188 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25190 if (INSN_P (insn))
25192 if (CALL_P (insn))
25194 if (!SIBLING_CALL_P (insn))
25195 return 1;
25197 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25198 return 1;
25199 else if (set_of (reg, insn) != NULL_RTX
25200 && !prologue_epilogue_contains (insn))
25201 return 1;
25204 return 0;
25207 /* Emit instructions needed to load the TOC register.
25208 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25209 a constant pool; or for SVR4 -fpic. */
25211 void
25212 rs6000_emit_load_toc_table (int fromprolog)
25214 rtx dest;
25215 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25217 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25219 char buf[30];
25220 rtx lab, tmp1, tmp2, got;
25222 lab = gen_label_rtx ();
25223 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25224 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25225 if (flag_pic == 2)
25227 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25228 need_toc_init = 1;
25230 else
25231 got = rs6000_got_sym ();
25232 tmp1 = tmp2 = dest;
25233 if (!fromprolog)
25235 tmp1 = gen_reg_rtx (Pmode);
25236 tmp2 = gen_reg_rtx (Pmode);
25238 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25239 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25240 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25241 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25243 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25245 emit_insn (gen_load_toc_v4_pic_si ());
25246 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25248 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25250 char buf[30];
25251 rtx temp0 = (fromprolog
25252 ? gen_rtx_REG (Pmode, 0)
25253 : gen_reg_rtx (Pmode));
25255 if (fromprolog)
25257 rtx symF, symL;
25259 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25260 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25262 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25263 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25265 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25266 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25267 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25269 else
25271 rtx tocsym, lab;
25273 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25274 need_toc_init = 1;
25275 lab = gen_label_rtx ();
25276 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25277 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25278 if (TARGET_LINK_STACK)
25279 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25280 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25282 emit_insn (gen_addsi3 (dest, temp0, dest));
25284 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25286 /* This is for AIX code running in non-PIC ELF32. */
25287 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25289 need_toc_init = 1;
25290 emit_insn (gen_elf_high (dest, realsym));
25291 emit_insn (gen_elf_low (dest, dest, realsym));
25293 else
25295 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25297 if (TARGET_32BIT)
25298 emit_insn (gen_load_toc_aix_si (dest));
25299 else
25300 emit_insn (gen_load_toc_aix_di (dest));
25304 /* Emit instructions to restore the link register after determining where
25305 its value has been stored. */
25307 void
25308 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25310 rs6000_stack_t *info = rs6000_stack_info ();
25311 rtx operands[2];
25313 operands[0] = source;
25314 operands[1] = scratch;
25316 if (info->lr_save_p)
25318 rtx frame_rtx = stack_pointer_rtx;
25319 HOST_WIDE_INT sp_offset = 0;
25320 rtx tmp;
25322 if (frame_pointer_needed
25323 || cfun->calls_alloca
25324 || info->total_size > 32767)
25326 tmp = gen_frame_mem (Pmode, frame_rtx);
25327 emit_move_insn (operands[1], tmp);
25328 frame_rtx = operands[1];
25330 else if (info->push_p)
25331 sp_offset = info->total_size;
25333 tmp = plus_constant (Pmode, frame_rtx,
25334 info->lr_save_offset + sp_offset);
25335 tmp = gen_frame_mem (Pmode, tmp);
25336 emit_move_insn (tmp, operands[0]);
25338 else
25339 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25341 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25342 state of lr_save_p so any change from here on would be a bug. In
25343 particular, stop rs6000_ra_ever_killed from considering the SET
25344 of lr we may have added just above. */
25345 cfun->machine->lr_save_state = info->lr_save_p + 1;
25348 static GTY(()) alias_set_type set = -1;
25350 alias_set_type
25351 get_TOC_alias_set (void)
25353 if (set == -1)
25354 set = new_alias_set ();
25355 return set;
25358 /* This returns nonzero if the current function uses the TOC. This is
25359 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25360 is generated by the ABI_V4 load_toc_* patterns. */
25361 #if TARGET_ELF
25362 static int
25363 uses_TOC (void)
25365 rtx_insn *insn;
25367 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25368 if (INSN_P (insn))
25370 rtx pat = PATTERN (insn);
25371 int i;
25373 if (GET_CODE (pat) == PARALLEL)
25374 for (i = 0; i < XVECLEN (pat, 0); i++)
25376 rtx sub = XVECEXP (pat, 0, i);
25377 if (GET_CODE (sub) == USE)
25379 sub = XEXP (sub, 0);
25380 if (GET_CODE (sub) == UNSPEC
25381 && XINT (sub, 1) == UNSPEC_TOC)
25382 return 1;
25386 return 0;
25388 #endif
25391 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25393 rtx tocrel, tocreg, hi;
25395 if (TARGET_DEBUG_ADDR)
25397 if (GET_CODE (symbol) == SYMBOL_REF)
25398 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25399 XSTR (symbol, 0));
25400 else
25402 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25403 GET_RTX_NAME (GET_CODE (symbol)));
25404 debug_rtx (symbol);
25408 if (!can_create_pseudo_p ())
25409 df_set_regs_ever_live (TOC_REGISTER, true);
25411 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25412 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25413 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25414 return tocrel;
25416 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25417 if (largetoc_reg != NULL)
25419 emit_move_insn (largetoc_reg, hi);
25420 hi = largetoc_reg;
25422 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25425 /* Issue assembly directives that create a reference to the given DWARF
25426 FRAME_TABLE_LABEL from the current function section. */
25427 void
25428 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25430 fprintf (asm_out_file, "\t.ref %s\n",
25431 (* targetm.strip_name_encoding) (frame_table_label));
25434 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25435 and the change to the stack pointer. */
25437 static void
25438 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25440 rtvec p;
25441 int i;
25442 rtx regs[3];
25444 i = 0;
25445 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25446 if (hard_frame_needed)
25447 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25448 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25449 || (hard_frame_needed
25450 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25451 regs[i++] = fp;
25453 p = rtvec_alloc (i);
25454 while (--i >= 0)
25456 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25457 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25460 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25463 /* Emit the correct code for allocating stack space, as insns.
25464 If COPY_REG, make sure a copy of the old frame is left there.
25465 The generated code may use hard register 0 as a temporary. */
25467 static rtx_insn *
25468 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25470 rtx_insn *insn;
25471 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25472 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25473 rtx todec = gen_int_mode (-size, Pmode);
25474 rtx par, set, mem;
25476 if (INTVAL (todec) != -size)
25478 warning (0, "stack frame too large");
25479 emit_insn (gen_trap ());
25480 return 0;
25483 if (crtl->limit_stack)
25485 if (REG_P (stack_limit_rtx)
25486 && REGNO (stack_limit_rtx) > 1
25487 && REGNO (stack_limit_rtx) <= 31)
25489 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
25490 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25491 const0_rtx));
25493 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25494 && TARGET_32BIT
25495 && DEFAULT_ABI == ABI_V4)
25497 rtx toload = gen_rtx_CONST (VOIDmode,
25498 gen_rtx_PLUS (Pmode,
25499 stack_limit_rtx,
25500 GEN_INT (size)));
25502 emit_insn (gen_elf_high (tmp_reg, toload));
25503 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25504 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25505 const0_rtx));
25507 else
25508 warning (0, "stack limit expression is not supported");
25511 if (copy_reg)
25513 if (copy_off != 0)
25514 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25515 else
25516 emit_move_insn (copy_reg, stack_reg);
25519 if (size > 32767)
25521 /* Need a note here so that try_split doesn't get confused. */
25522 if (get_last_insn () == NULL_RTX)
25523 emit_note (NOTE_INSN_DELETED);
25524 insn = emit_move_insn (tmp_reg, todec);
25525 try_split (PATTERN (insn), insn, 0);
25526 todec = tmp_reg;
25529 insn = emit_insn (TARGET_32BIT
25530 ? gen_movsi_update_stack (stack_reg, stack_reg,
25531 todec, stack_reg)
25532 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25533 todec, stack_reg));
25534 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25535 it now and set the alias set/attributes. The above gen_*_update
25536 calls will generate a PARALLEL with the MEM set being the first
25537 operation. */
25538 par = PATTERN (insn);
25539 gcc_assert (GET_CODE (par) == PARALLEL);
25540 set = XVECEXP (par, 0, 0);
25541 gcc_assert (GET_CODE (set) == SET);
25542 mem = SET_DEST (set);
25543 gcc_assert (MEM_P (mem));
25544 MEM_NOTRAP_P (mem) = 1;
25545 set_mem_alias_set (mem, get_frame_alias_set ());
25547 RTX_FRAME_RELATED_P (insn) = 1;
25548 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25549 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25550 GEN_INT (-size))));
25551 return insn;
25554 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25556 #if PROBE_INTERVAL > 32768
25557 #error Cannot use indexed addressing mode for stack probing
25558 #endif
25560 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25561 inclusive. These are offsets from the current stack pointer. */
25563 static void
25564 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25566 /* See if we have a constant small number of probes to generate. If so,
25567 that's the easy case. */
25568 if (first + size <= 32768)
25570 HOST_WIDE_INT i;
25572 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25573 it exceeds SIZE. If only one probe is needed, this will not
25574 generate any code. Then probe at FIRST + SIZE. */
25575 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25576 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25577 -(first + i)));
25579 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25580 -(first + size)));
25583 /* Otherwise, do the same as above, but in a loop. Note that we must be
25584 extra careful with variables wrapping around because we might be at
25585 the very top (or the very bottom) of the address space and we have
25586 to be able to handle this case properly; in particular, we use an
25587 equality test for the loop condition. */
25588 else
25590 HOST_WIDE_INT rounded_size;
25591 rtx r12 = gen_rtx_REG (Pmode, 12);
25592 rtx r0 = gen_rtx_REG (Pmode, 0);
25594 /* Sanity check for the addressing mode we're going to use. */
25595 gcc_assert (first <= 32768);
25597 /* Step 1: round SIZE to the previous multiple of the interval. */
25599 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25602 /* Step 2: compute initial and final value of the loop counter. */
25604 /* TEST_ADDR = SP + FIRST. */
25605 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25606 -first)));
25608 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25609 if (rounded_size > 32768)
25611 emit_move_insn (r0, GEN_INT (-rounded_size));
25612 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25614 else
25615 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25616 -rounded_size)));
25619 /* Step 3: the loop
25623 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25624 probe at TEST_ADDR
25626 while (TEST_ADDR != LAST_ADDR)
25628 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25629 until it is equal to ROUNDED_SIZE. */
25631 if (TARGET_64BIT)
25632 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25633 else
25634 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25637 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25638 that SIZE is equal to ROUNDED_SIZE. */
25640 if (size != rounded_size)
25641 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25645 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25646 absolute addresses. */
25648 const char *
25649 output_probe_stack_range (rtx reg1, rtx reg2)
25651 static int labelno = 0;
25652 char loop_lab[32];
25653 rtx xops[2];
25655 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25657 /* Loop. */
25658 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25660 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25661 xops[0] = reg1;
25662 xops[1] = GEN_INT (-PROBE_INTERVAL);
25663 output_asm_insn ("addi %0,%0,%1", xops);
25665 /* Probe at TEST_ADDR. */
25666 xops[1] = gen_rtx_REG (Pmode, 0);
25667 output_asm_insn ("stw %1,0(%0)", xops);
25669 /* Test if TEST_ADDR == LAST_ADDR. */
25670 xops[1] = reg2;
25671 if (TARGET_64BIT)
25672 output_asm_insn ("cmpd 0,%0,%1", xops);
25673 else
25674 output_asm_insn ("cmpw 0,%0,%1", xops);
25676 /* Branch. */
25677 fputs ("\tbne 0,", asm_out_file);
25678 assemble_name_raw (asm_out_file, loop_lab);
25679 fputc ('\n', asm_out_file);
25681 return "";
25684 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25685 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25686 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25687 deduce these equivalences by itself so it wasn't necessary to hold
25688 its hand so much. Don't be tempted to always supply d2_f_d_e with
25689 the actual cfa register, ie. r31 when we are using a hard frame
25690 pointer. That fails when saving regs off r1, and sched moves the
25691 r31 setup past the reg saves. */
25693 static rtx
25694 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
25695 rtx reg2, rtx repl2)
25697 rtx repl;
25699 if (REGNO (reg) == STACK_POINTER_REGNUM)
25701 gcc_checking_assert (val == 0);
25702 repl = NULL_RTX;
25704 else
25705 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25706 GEN_INT (val));
25708 rtx pat = PATTERN (insn);
25709 if (!repl && !reg2)
25711 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25712 if (GET_CODE (pat) == PARALLEL)
25713 for (int i = 0; i < XVECLEN (pat, 0); i++)
25714 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25716 rtx set = XVECEXP (pat, 0, i);
25718 /* If this PARALLEL has been emitted for out-of-line
25719 register save functions, or store multiple, then omit
25720 eh_frame info for any user-defined global regs. If
25721 eh_frame info is supplied, frame unwinding will
25722 restore a user reg. */
25723 if (!REG_P (SET_SRC (set))
25724 || !fixed_reg_p (REGNO (SET_SRC (set))))
25725 RTX_FRAME_RELATED_P (set) = 1;
25727 RTX_FRAME_RELATED_P (insn) = 1;
25728 return insn;
25731 /* We expect that 'pat' is either a SET or a PARALLEL containing
25732 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25733 are important so they all have to be marked RTX_FRAME_RELATED_P.
25734 Call simplify_replace_rtx on the SETs rather than the whole insn
25735 so as to leave the other stuff alone (for example USE of r12). */
25737 if (GET_CODE (pat) == SET)
25739 if (repl)
25740 pat = simplify_replace_rtx (pat, reg, repl);
25741 if (reg2)
25742 pat = simplify_replace_rtx (pat, reg2, repl2);
25744 else if (GET_CODE (pat) == PARALLEL)
25746 pat = shallow_copy_rtx (pat);
25747 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25749 for (int i = 0; i < XVECLEN (pat, 0); i++)
25750 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25752 rtx set = XVECEXP (pat, 0, i);
25754 if (repl)
25755 set = simplify_replace_rtx (set, reg, repl);
25756 if (reg2)
25757 set = simplify_replace_rtx (set, reg2, repl2);
25758 XVECEXP (pat, 0, i) = set;
25760 /* Omit eh_frame info for any user-defined global regs. */
25761 if (!REG_P (SET_SRC (set))
25762 || !fixed_reg_p (REGNO (SET_SRC (set))))
25763 RTX_FRAME_RELATED_P (set) = 1;
25766 else
25767 gcc_unreachable ();
25769 RTX_FRAME_RELATED_P (insn) = 1;
25770 if (repl || reg2)
25771 add_reg_note (insn, REG_FRAME_RELATED_EXPR, pat);
25773 return insn;
25776 /* Returns an insn that has a vrsave set operation with the
25777 appropriate CLOBBERs. */
25779 static rtx
25780 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25782 int nclobs, i;
25783 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25784 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25786 clobs[0]
25787 = gen_rtx_SET (vrsave,
25788 gen_rtx_UNSPEC_VOLATILE (SImode,
25789 gen_rtvec (2, reg, vrsave),
25790 UNSPECV_SET_VRSAVE));
25792 nclobs = 1;
25794 /* We need to clobber the registers in the mask so the scheduler
25795 does not move sets to VRSAVE before sets of AltiVec registers.
25797 However, if the function receives nonlocal gotos, reload will set
25798 all call saved registers live. We will end up with:
25800 (set (reg 999) (mem))
25801 (parallel [ (set (reg vrsave) (unspec blah))
25802 (clobber (reg 999))])
25804 The clobber will cause the store into reg 999 to be dead, and
25805 flow will attempt to delete an epilogue insn. In this case, we
25806 need an unspec use/set of the register. */
25808 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25809 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25811 if (!epiloguep || call_used_regs [i])
25812 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25813 gen_rtx_REG (V4SImode, i));
25814 else
25816 rtx reg = gen_rtx_REG (V4SImode, i);
25818 clobs[nclobs++]
25819 = gen_rtx_SET (reg,
25820 gen_rtx_UNSPEC (V4SImode,
25821 gen_rtvec (1, reg), 27));
25825 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25827 for (i = 0; i < nclobs; ++i)
25828 XVECEXP (insn, 0, i) = clobs[i];
25830 return insn;
25833 static rtx
25834 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25836 rtx addr, mem;
25838 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25839 mem = gen_frame_mem (GET_MODE (reg), addr);
25840 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25843 static rtx
25844 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25846 return gen_frame_set (reg, frame_reg, offset, false);
25849 static rtx
25850 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25852 return gen_frame_set (reg, frame_reg, offset, true);
25855 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25856 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25858 static rtx
25859 emit_frame_save (rtx frame_reg, machine_mode mode,
25860 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25862 rtx reg, insn;
25864 /* Some cases that need register indexed addressing. */
25865 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25866 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
25867 || (TARGET_E500_DOUBLE && mode == DFmode)
25868 || (TARGET_SPE_ABI
25869 && SPE_VECTOR_MODE (mode)
25870 && !SPE_CONST_OFFSET_OK (offset))));
25872 reg = gen_rtx_REG (mode, regno);
25873 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25874 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25875 NULL_RTX, NULL_RTX);
25878 /* Emit an offset memory reference suitable for a frame store, while
25879 converting to a valid addressing mode. */
25881 static rtx
25882 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25884 rtx int_rtx, offset_rtx;
25886 int_rtx = GEN_INT (offset);
25888 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
25889 || (TARGET_E500_DOUBLE && mode == DFmode))
25891 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
25892 emit_move_insn (offset_rtx, int_rtx);
25894 else
25895 offset_rtx = int_rtx;
25897 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
25900 #ifndef TARGET_FIX_AND_CONTINUE
25901 #define TARGET_FIX_AND_CONTINUE 0
25902 #endif
25904 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25905 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25906 #define LAST_SAVRES_REGISTER 31
25907 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25909 enum {
25910 SAVRES_LR = 0x1,
25911 SAVRES_SAVE = 0x2,
25912 SAVRES_REG = 0x0c,
25913 SAVRES_GPR = 0,
25914 SAVRES_FPR = 4,
25915 SAVRES_VR = 8
25918 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25920 /* Temporary holding space for an out-of-line register save/restore
25921 routine name. */
25922 static char savres_routine_name[30];
25924 /* Return the name for an out-of-line register save/restore routine.
25925 We are saving/restoring GPRs if GPR is true. */
25927 static char *
25928 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
25930 const char *prefix = "";
25931 const char *suffix = "";
25933 /* Different targets are supposed to define
25934 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25935 routine name could be defined with:
25937 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25939 This is a nice idea in practice, but in reality, things are
25940 complicated in several ways:
25942 - ELF targets have save/restore routines for GPRs.
25944 - SPE targets use different prefixes for 32/64-bit registers, and
25945 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
25947 - PPC64 ELF targets have routines for save/restore of GPRs that
25948 differ in what they do with the link register, so having a set
25949 prefix doesn't work. (We only use one of the save routines at
25950 the moment, though.)
25952 - PPC32 elf targets have "exit" versions of the restore routines
25953 that restore the link register and can save some extra space.
25954 These require an extra suffix. (There are also "tail" versions
25955 of the restore routines and "GOT" versions of the save routines,
25956 but we don't generate those at present. Same problems apply,
25957 though.)
25959 We deal with all this by synthesizing our own prefix/suffix and
25960 using that for the simple sprintf call shown above. */
25961 if (TARGET_SPE)
25963 /* No floating point saves on the SPE. */
25964 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
25966 if ((sel & SAVRES_SAVE))
25967 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
25968 else
25969 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
25971 if ((sel & SAVRES_LR))
25972 suffix = "_x";
25974 else if (DEFAULT_ABI == ABI_V4)
25976 if (TARGET_64BIT)
25977 goto aix_names;
25979 if ((sel & SAVRES_REG) == SAVRES_GPR)
25980 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25981 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25982 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25983 else if ((sel & SAVRES_REG) == SAVRES_VR)
25984 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25985 else
25986 abort ();
25988 if ((sel & SAVRES_LR))
25989 suffix = "_x";
25991 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25993 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25994 /* No out-of-line save/restore routines for GPRs on AIX. */
25995 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25996 #endif
25998 aix_names:
25999 if ((sel & SAVRES_REG) == SAVRES_GPR)
26000 prefix = ((sel & SAVRES_SAVE)
26001 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26002 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26003 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26005 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26006 if ((sel & SAVRES_LR))
26007 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26008 else
26009 #endif
26011 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26012 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26015 else if ((sel & SAVRES_REG) == SAVRES_VR)
26016 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26017 else
26018 abort ();
26021 if (DEFAULT_ABI == ABI_DARWIN)
26023 /* The Darwin approach is (slightly) different, in order to be
26024 compatible with code generated by the system toolchain. There is a
26025 single symbol for the start of save sequence, and the code here
26026 embeds an offset into that code on the basis of the first register
26027 to be saved. */
26028 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26029 if ((sel & SAVRES_REG) == SAVRES_GPR)
26030 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26031 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26032 (regno - 13) * 4, prefix, regno);
26033 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26034 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26035 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26036 else if ((sel & SAVRES_REG) == SAVRES_VR)
26037 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26038 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26039 else
26040 abort ();
26042 else
26043 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26045 return savres_routine_name;
26048 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26049 We are saving/restoring GPRs if GPR is true. */
26051 static rtx
26052 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26054 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26055 ? info->first_gp_reg_save
26056 : (sel & SAVRES_REG) == SAVRES_FPR
26057 ? info->first_fp_reg_save - 32
26058 : (sel & SAVRES_REG) == SAVRES_VR
26059 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26060 : -1);
26061 rtx sym;
26062 int select = sel;
26064 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
26065 versions of the gpr routines. */
26066 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
26067 && info->spe_64bit_regs_used)
26068 select ^= SAVRES_FPR ^ SAVRES_GPR;
26070 /* Don't generate bogus routine names. */
26071 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26072 && regno <= LAST_SAVRES_REGISTER
26073 && select >= 0 && select <= 12);
26075 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26077 if (sym == NULL)
26079 char *name;
26081 name = rs6000_savres_routine_name (info, regno, sel);
26083 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26084 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26085 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26088 return sym;
26091 /* Emit a sequence of insns, including a stack tie if needed, for
26092 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26093 reset the stack pointer, but move the base of the frame into
26094 reg UPDT_REGNO for use by out-of-line register restore routines. */
26096 static rtx
26097 rs6000_emit_stack_reset (rs6000_stack_t *info,
26098 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26099 unsigned updt_regno)
26101 rtx updt_reg_rtx;
26103 /* This blockage is needed so that sched doesn't decide to move
26104 the sp change before the register restores. */
26105 if (DEFAULT_ABI == ABI_V4
26106 || (TARGET_SPE_ABI
26107 && info->spe_64bit_regs_used != 0
26108 && info->first_gp_reg_save != 32))
26109 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
26111 /* If we are restoring registers out-of-line, we will be using the
26112 "exit" variants of the restore routines, which will reset the
26113 stack for us. But we do need to point updt_reg into the
26114 right place for those routines. */
26115 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26117 if (frame_off != 0)
26118 return emit_insn (gen_add3_insn (updt_reg_rtx,
26119 frame_reg_rtx, GEN_INT (frame_off)));
26120 else if (REGNO (frame_reg_rtx) != updt_regno)
26121 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26123 return NULL_RTX;
26126 /* Return the register number used as a pointer by out-of-line
26127 save/restore functions. */
26129 static inline unsigned
26130 ptr_regno_for_savres (int sel)
26132 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26133 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26134 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26137 /* Construct a parallel rtx describing the effect of a call to an
26138 out-of-line register save/restore routine, and emit the insn
26139 or jump_insn as appropriate. */
26141 static rtx
26142 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26143 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26144 machine_mode reg_mode, int sel)
26146 int i;
26147 int offset, start_reg, end_reg, n_regs, use_reg;
26148 int reg_size = GET_MODE_SIZE (reg_mode);
26149 rtx sym;
26150 rtvec p;
26151 rtx par, insn;
26153 offset = 0;
26154 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26155 ? info->first_gp_reg_save
26156 : (sel & SAVRES_REG) == SAVRES_FPR
26157 ? info->first_fp_reg_save
26158 : (sel & SAVRES_REG) == SAVRES_VR
26159 ? info->first_altivec_reg_save
26160 : -1);
26161 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26162 ? 32
26163 : (sel & SAVRES_REG) == SAVRES_FPR
26164 ? 64
26165 : (sel & SAVRES_REG) == SAVRES_VR
26166 ? LAST_ALTIVEC_REGNO + 1
26167 : -1);
26168 n_regs = end_reg - start_reg;
26169 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26170 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26171 + n_regs);
26173 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26174 RTVEC_ELT (p, offset++) = ret_rtx;
26176 RTVEC_ELT (p, offset++)
26177 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26179 sym = rs6000_savres_routine_sym (info, sel);
26180 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26182 use_reg = ptr_regno_for_savres (sel);
26183 if ((sel & SAVRES_REG) == SAVRES_VR)
26185 /* Vector regs are saved/restored using [reg+reg] addressing. */
26186 RTVEC_ELT (p, offset++)
26187 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26188 RTVEC_ELT (p, offset++)
26189 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26191 else
26192 RTVEC_ELT (p, offset++)
26193 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26195 for (i = 0; i < end_reg - start_reg; i++)
26196 RTVEC_ELT (p, i + offset)
26197 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26198 frame_reg_rtx, save_area_offset + reg_size * i,
26199 (sel & SAVRES_SAVE) != 0);
26201 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26202 RTVEC_ELT (p, i + offset)
26203 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26205 par = gen_rtx_PARALLEL (VOIDmode, p);
26207 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26209 insn = emit_jump_insn (par);
26210 JUMP_LABEL (insn) = ret_rtx;
26212 else
26213 insn = emit_insn (par);
26214 return insn;
26217 /* Emit code to store CR fields that need to be saved into REG. */
26219 static void
26220 rs6000_emit_move_from_cr (rtx reg)
26222 /* Only the ELFv2 ABI allows storing only selected fields. */
26223 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26225 int i, cr_reg[8], count = 0;
26227 /* Collect CR fields that must be saved. */
26228 for (i = 0; i < 8; i++)
26229 if (save_reg_p (CR0_REGNO + i))
26230 cr_reg[count++] = i;
26232 /* If it's just a single one, use mfcrf. */
26233 if (count == 1)
26235 rtvec p = rtvec_alloc (1);
26236 rtvec r = rtvec_alloc (2);
26237 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26238 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26239 RTVEC_ELT (p, 0)
26240 = gen_rtx_SET (reg,
26241 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26243 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26244 return;
26247 /* ??? It might be better to handle count == 2 / 3 cases here
26248 as well, using logical operations to combine the values. */
26251 emit_insn (gen_movesi_from_cr (reg));
26254 /* Return whether the split-stack arg pointer (r12) is used. */
26256 static bool
26257 split_stack_arg_pointer_used_p (void)
26259 /* If the pseudo holding the arg pointer is no longer a pseudo,
26260 then the arg pointer is used. */
26261 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26262 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26263 || (REGNO (cfun->machine->split_stack_arg_pointer)
26264 < FIRST_PSEUDO_REGISTER)))
26265 return true;
26267 /* Unfortunately we also need to do some code scanning, since
26268 r12 may have been substituted for the pseudo. */
26269 rtx_insn *insn;
26270 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26271 FOR_BB_INSNS (bb, insn)
26272 if (NONDEBUG_INSN_P (insn))
26274 /* A call destroys r12. */
26275 if (CALL_P (insn))
26276 return false;
26278 df_ref use;
26279 FOR_EACH_INSN_USE (use, insn)
26281 rtx x = DF_REF_REG (use);
26282 if (REG_P (x) && REGNO (x) == 12)
26283 return true;
26285 df_ref def;
26286 FOR_EACH_INSN_DEF (def, insn)
26288 rtx x = DF_REF_REG (def);
26289 if (REG_P (x) && REGNO (x) == 12)
26290 return false;
26293 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26296 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26298 static bool
26299 rs6000_global_entry_point_needed_p (void)
26301 /* Only needed for the ELFv2 ABI. */
26302 if (DEFAULT_ABI != ABI_ELFv2)
26303 return false;
26305 /* With -msingle-pic-base, we assume the whole program shares the same
26306 TOC, so no global entry point prologues are needed anywhere. */
26307 if (TARGET_SINGLE_PIC_BASE)
26308 return false;
26310 /* Ensure we have a global entry point for thunks. ??? We could
26311 avoid that if the target routine doesn't need a global entry point,
26312 but we do not know whether this is the case at this point. */
26313 if (cfun->is_thunk)
26314 return true;
26316 /* For regular functions, rs6000_emit_prologue sets this flag if the
26317 routine ever uses the TOC pointer. */
26318 return cfun->machine->r2_setup_needed;
26321 /* Emit function prologue as insns. */
26323 void
26324 rs6000_emit_prologue (void)
26326 rs6000_stack_t *info = rs6000_stack_info ();
26327 machine_mode reg_mode = Pmode;
26328 int reg_size = TARGET_32BIT ? 4 : 8;
26329 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26330 rtx frame_reg_rtx = sp_reg_rtx;
26331 unsigned int cr_save_regno;
26332 rtx cr_save_rtx = NULL_RTX;
26333 rtx insn;
26334 int strategy;
26335 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26336 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26337 && call_used_regs[STATIC_CHAIN_REGNUM]);
26338 int using_split_stack = (flag_split_stack
26339 && (lookup_attribute ("no_split_stack",
26340 DECL_ATTRIBUTES (cfun->decl))
26341 == NULL));
26343 /* Offset to top of frame for frame_reg and sp respectively. */
26344 HOST_WIDE_INT frame_off = 0;
26345 HOST_WIDE_INT sp_off = 0;
26346 /* sp_adjust is the stack adjusting instruction, tracked so that the
26347 insn setting up the split-stack arg pointer can be emitted just
26348 prior to it, when r12 is not used here for other purposes. */
26349 rtx_insn *sp_adjust = 0;
26351 #if CHECKING_P
26352 /* Track and check usage of r0, r11, r12. */
26353 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26354 #define START_USE(R) do \
26356 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26357 reg_inuse |= 1 << (R); \
26358 } while (0)
26359 #define END_USE(R) do \
26361 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26362 reg_inuse &= ~(1 << (R)); \
26363 } while (0)
26364 #define NOT_INUSE(R) do \
26366 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26367 } while (0)
26368 #else
26369 #define START_USE(R) do {} while (0)
26370 #define END_USE(R) do {} while (0)
26371 #define NOT_INUSE(R) do {} while (0)
26372 #endif
26374 if (DEFAULT_ABI == ABI_ELFv2
26375 && !TARGET_SINGLE_PIC_BASE)
26377 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26379 /* With -mminimal-toc we may generate an extra use of r2 below. */
26380 if (TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
26381 cfun->machine->r2_setup_needed = true;
26385 if (flag_stack_usage_info)
26386 current_function_static_stack_size = info->total_size;
26388 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26390 HOST_WIDE_INT size = info->total_size;
26392 if (crtl->is_leaf && !cfun->calls_alloca)
26394 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26395 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26396 size - STACK_CHECK_PROTECT);
26398 else if (size > 0)
26399 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26402 if (TARGET_FIX_AND_CONTINUE)
26404 /* gdb on darwin arranges to forward a function from the old
26405 address by modifying the first 5 instructions of the function
26406 to branch to the overriding function. This is necessary to
26407 permit function pointers that point to the old function to
26408 actually forward to the new function. */
26409 emit_insn (gen_nop ());
26410 emit_insn (gen_nop ());
26411 emit_insn (gen_nop ());
26412 emit_insn (gen_nop ());
26413 emit_insn (gen_nop ());
26416 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
26418 reg_mode = V2SImode;
26419 reg_size = 8;
26422 /* Handle world saves specially here. */
26423 if (WORLD_SAVE_P (info))
26425 int i, j, sz;
26426 rtx treg;
26427 rtvec p;
26428 rtx reg0;
26430 /* save_world expects lr in r0. */
26431 reg0 = gen_rtx_REG (Pmode, 0);
26432 if (info->lr_save_p)
26434 insn = emit_move_insn (reg0,
26435 gen_rtx_REG (Pmode, LR_REGNO));
26436 RTX_FRAME_RELATED_P (insn) = 1;
26439 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26440 assumptions about the offsets of various bits of the stack
26441 frame. */
26442 gcc_assert (info->gp_save_offset == -220
26443 && info->fp_save_offset == -144
26444 && info->lr_save_offset == 8
26445 && info->cr_save_offset == 4
26446 && info->push_p
26447 && info->lr_save_p
26448 && (!crtl->calls_eh_return
26449 || info->ehrd_offset == -432)
26450 && info->vrsave_save_offset == -224
26451 && info->altivec_save_offset == -416);
26453 treg = gen_rtx_REG (SImode, 11);
26454 emit_move_insn (treg, GEN_INT (-info->total_size));
26456 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26457 in R11. It also clobbers R12, so beware! */
26459 /* Preserve CR2 for save_world prologues */
26460 sz = 5;
26461 sz += 32 - info->first_gp_reg_save;
26462 sz += 64 - info->first_fp_reg_save;
26463 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26464 p = rtvec_alloc (sz);
26465 j = 0;
26466 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26467 gen_rtx_REG (SImode,
26468 LR_REGNO));
26469 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26470 gen_rtx_SYMBOL_REF (Pmode,
26471 "*save_world"));
26472 /* We do floats first so that the instruction pattern matches
26473 properly. */
26474 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26475 RTVEC_ELT (p, j++)
26476 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26477 ? DFmode : SFmode,
26478 info->first_fp_reg_save + i),
26479 frame_reg_rtx,
26480 info->fp_save_offset + frame_off + 8 * i);
26481 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26482 RTVEC_ELT (p, j++)
26483 = gen_frame_store (gen_rtx_REG (V4SImode,
26484 info->first_altivec_reg_save + i),
26485 frame_reg_rtx,
26486 info->altivec_save_offset + frame_off + 16 * i);
26487 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26488 RTVEC_ELT (p, j++)
26489 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26490 frame_reg_rtx,
26491 info->gp_save_offset + frame_off + reg_size * i);
26493 /* CR register traditionally saved as CR2. */
26494 RTVEC_ELT (p, j++)
26495 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26496 frame_reg_rtx, info->cr_save_offset + frame_off);
26497 /* Explain about use of R0. */
26498 if (info->lr_save_p)
26499 RTVEC_ELT (p, j++)
26500 = gen_frame_store (reg0,
26501 frame_reg_rtx, info->lr_save_offset + frame_off);
26502 /* Explain what happens to the stack pointer. */
26504 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26505 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26508 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26509 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26510 treg, GEN_INT (-info->total_size));
26511 sp_off = frame_off = info->total_size;
26514 strategy = info->savres_strategy;
26516 /* For V.4, update stack before we do any saving and set back pointer. */
26517 if (! WORLD_SAVE_P (info)
26518 && info->push_p
26519 && (DEFAULT_ABI == ABI_V4
26520 || crtl->calls_eh_return))
26522 bool need_r11 = (TARGET_SPE
26523 ? (!(strategy & SAVE_INLINE_GPRS)
26524 && info->spe_64bit_regs_used == 0)
26525 : (!(strategy & SAVE_INLINE_FPRS)
26526 || !(strategy & SAVE_INLINE_GPRS)
26527 || !(strategy & SAVE_INLINE_VRS)));
26528 int ptr_regno = -1;
26529 rtx ptr_reg = NULL_RTX;
26530 int ptr_off = 0;
26532 if (info->total_size < 32767)
26533 frame_off = info->total_size;
26534 else if (need_r11)
26535 ptr_regno = 11;
26536 else if (info->cr_save_p
26537 || info->lr_save_p
26538 || info->first_fp_reg_save < 64
26539 || info->first_gp_reg_save < 32
26540 || info->altivec_size != 0
26541 || info->vrsave_size != 0
26542 || crtl->calls_eh_return)
26543 ptr_regno = 12;
26544 else
26546 /* The prologue won't be saving any regs so there is no need
26547 to set up a frame register to access any frame save area.
26548 We also won't be using frame_off anywhere below, but set
26549 the correct value anyway to protect against future
26550 changes to this function. */
26551 frame_off = info->total_size;
26553 if (ptr_regno != -1)
26555 /* Set up the frame offset to that needed by the first
26556 out-of-line save function. */
26557 START_USE (ptr_regno);
26558 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26559 frame_reg_rtx = ptr_reg;
26560 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26561 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26562 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26563 ptr_off = info->gp_save_offset + info->gp_size;
26564 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26565 ptr_off = info->altivec_save_offset + info->altivec_size;
26566 frame_off = -ptr_off;
26568 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26569 ptr_reg, ptr_off);
26570 if (REGNO (frame_reg_rtx) == 12)
26571 sp_adjust = 0;
26572 sp_off = info->total_size;
26573 if (frame_reg_rtx != sp_reg_rtx)
26574 rs6000_emit_stack_tie (frame_reg_rtx, false);
26577 /* If we use the link register, get it into r0. */
26578 if (!WORLD_SAVE_P (info) && info->lr_save_p)
26580 rtx addr, reg, mem;
26582 reg = gen_rtx_REG (Pmode, 0);
26583 START_USE (0);
26584 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26585 RTX_FRAME_RELATED_P (insn) = 1;
26587 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26588 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26590 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26591 GEN_INT (info->lr_save_offset + frame_off));
26592 mem = gen_rtx_MEM (Pmode, addr);
26593 /* This should not be of rs6000_sr_alias_set, because of
26594 __builtin_return_address. */
26596 insn = emit_move_insn (mem, reg);
26597 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26598 NULL_RTX, NULL_RTX);
26599 END_USE (0);
26603 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26604 r12 will be needed by out-of-line gpr restore. */
26605 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26606 && !(strategy & (SAVE_INLINE_GPRS
26607 | SAVE_NOINLINE_GPRS_SAVES_LR))
26608 ? 11 : 12);
26609 if (!WORLD_SAVE_P (info)
26610 && info->cr_save_p
26611 && REGNO (frame_reg_rtx) != cr_save_regno
26612 && !(using_static_chain_p && cr_save_regno == 11)
26613 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26615 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26616 START_USE (cr_save_regno);
26617 rs6000_emit_move_from_cr (cr_save_rtx);
26620 /* Do any required saving of fpr's. If only one or two to save, do
26621 it ourselves. Otherwise, call function. */
26622 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26624 int i;
26625 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26626 if (save_reg_p (info->first_fp_reg_save + i))
26627 emit_frame_save (frame_reg_rtx,
26628 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26629 ? DFmode : SFmode),
26630 info->first_fp_reg_save + i,
26631 info->fp_save_offset + frame_off + 8 * i,
26632 sp_off - frame_off);
26634 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26636 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26637 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26638 unsigned ptr_regno = ptr_regno_for_savres (sel);
26639 rtx ptr_reg = frame_reg_rtx;
26641 if (REGNO (frame_reg_rtx) == ptr_regno)
26642 gcc_checking_assert (frame_off == 0);
26643 else
26645 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26646 NOT_INUSE (ptr_regno);
26647 emit_insn (gen_add3_insn (ptr_reg,
26648 frame_reg_rtx, GEN_INT (frame_off)));
26650 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26651 info->fp_save_offset,
26652 info->lr_save_offset,
26653 DFmode, sel);
26654 rs6000_frame_related (insn, ptr_reg, sp_off,
26655 NULL_RTX, NULL_RTX);
26656 if (lr)
26657 END_USE (0);
26660 /* Save GPRs. This is done as a PARALLEL if we are using
26661 the store-multiple instructions. */
26662 if (!WORLD_SAVE_P (info)
26663 && TARGET_SPE_ABI
26664 && info->spe_64bit_regs_used != 0
26665 && info->first_gp_reg_save != 32)
26667 int i;
26668 rtx spe_save_area_ptr;
26669 HOST_WIDE_INT save_off;
26670 int ool_adjust = 0;
26672 /* Determine whether we can address all of the registers that need
26673 to be saved with an offset from frame_reg_rtx that fits in
26674 the small const field for SPE memory instructions. */
26675 int spe_regs_addressable
26676 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
26677 + reg_size * (32 - info->first_gp_reg_save - 1))
26678 && (strategy & SAVE_INLINE_GPRS));
26680 if (spe_regs_addressable)
26682 spe_save_area_ptr = frame_reg_rtx;
26683 save_off = frame_off;
26685 else
26687 /* Make r11 point to the start of the SPE save area. We need
26688 to be careful here if r11 is holding the static chain. If
26689 it is, then temporarily save it in r0. */
26690 HOST_WIDE_INT offset;
26692 if (!(strategy & SAVE_INLINE_GPRS))
26693 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
26694 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
26695 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
26696 save_off = frame_off - offset;
26698 if (using_static_chain_p)
26700 rtx r0 = gen_rtx_REG (Pmode, 0);
26702 START_USE (0);
26703 gcc_assert (info->first_gp_reg_save > 11);
26705 emit_move_insn (r0, spe_save_area_ptr);
26707 else if (REGNO (frame_reg_rtx) != 11)
26708 START_USE (11);
26710 emit_insn (gen_addsi3 (spe_save_area_ptr,
26711 frame_reg_rtx, GEN_INT (offset)));
26712 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
26713 frame_off = -info->spe_gp_save_offset + ool_adjust;
26716 if ((strategy & SAVE_INLINE_GPRS))
26718 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26719 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
26720 emit_frame_save (spe_save_area_ptr, reg_mode,
26721 info->first_gp_reg_save + i,
26722 (info->spe_gp_save_offset + save_off
26723 + reg_size * i),
26724 sp_off - save_off);
26726 else
26728 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
26729 info->spe_gp_save_offset + save_off,
26730 0, reg_mode,
26731 SAVRES_SAVE | SAVRES_GPR);
26733 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
26734 NULL_RTX, NULL_RTX);
26737 /* Move the static chain pointer back. */
26738 if (!spe_regs_addressable)
26740 if (using_static_chain_p)
26742 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
26743 END_USE (0);
26745 else if (REGNO (frame_reg_rtx) != 11)
26746 END_USE (11);
26749 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26751 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26752 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26753 unsigned ptr_regno = ptr_regno_for_savres (sel);
26754 rtx ptr_reg = frame_reg_rtx;
26755 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26756 int end_save = info->gp_save_offset + info->gp_size;
26757 int ptr_off;
26759 if (ptr_regno == 12)
26760 sp_adjust = 0;
26761 if (!ptr_set_up)
26762 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26764 /* Need to adjust r11 (r12) if we saved any FPRs. */
26765 if (end_save + frame_off != 0)
26767 rtx offset = GEN_INT (end_save + frame_off);
26769 if (ptr_set_up)
26770 frame_off = -end_save;
26771 else
26772 NOT_INUSE (ptr_regno);
26773 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26775 else if (!ptr_set_up)
26777 NOT_INUSE (ptr_regno);
26778 emit_move_insn (ptr_reg, frame_reg_rtx);
26780 ptr_off = -end_save;
26781 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26782 info->gp_save_offset + ptr_off,
26783 info->lr_save_offset + ptr_off,
26784 reg_mode, sel);
26785 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26786 NULL_RTX, NULL_RTX);
26787 if (lr)
26788 END_USE (0);
26790 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26792 rtvec p;
26793 int i;
26794 p = rtvec_alloc (32 - info->first_gp_reg_save);
26795 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26796 RTVEC_ELT (p, i)
26797 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26798 frame_reg_rtx,
26799 info->gp_save_offset + frame_off + reg_size * i);
26800 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26801 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26802 NULL_RTX, NULL_RTX);
26804 else if (!WORLD_SAVE_P (info))
26806 int i;
26807 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26808 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
26809 emit_frame_save (frame_reg_rtx, reg_mode,
26810 info->first_gp_reg_save + i,
26811 info->gp_save_offset + frame_off + reg_size * i,
26812 sp_off - frame_off);
26815 if (crtl->calls_eh_return)
26817 unsigned int i;
26818 rtvec p;
26820 for (i = 0; ; ++i)
26822 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26823 if (regno == INVALID_REGNUM)
26824 break;
26827 p = rtvec_alloc (i);
26829 for (i = 0; ; ++i)
26831 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26832 if (regno == INVALID_REGNUM)
26833 break;
26835 insn
26836 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
26837 sp_reg_rtx,
26838 info->ehrd_offset + sp_off + reg_size * (int) i);
26839 RTVEC_ELT (p, i) = insn;
26840 RTX_FRAME_RELATED_P (insn) = 1;
26843 insn = emit_insn (gen_blockage ());
26844 RTX_FRAME_RELATED_P (insn) = 1;
26845 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
26848 /* In AIX ABI we need to make sure r2 is really saved. */
26849 if (TARGET_AIX && crtl->calls_eh_return)
26851 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
26852 rtx save_insn, join_insn, note;
26853 long toc_restore_insn;
26855 tmp_reg = gen_rtx_REG (Pmode, 11);
26856 tmp_reg_si = gen_rtx_REG (SImode, 11);
26857 if (using_static_chain_p)
26859 START_USE (0);
26860 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
26862 else
26863 START_USE (11);
26864 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
26865 /* Peek at instruction to which this function returns. If it's
26866 restoring r2, then we know we've already saved r2. We can't
26867 unconditionally save r2 because the value we have will already
26868 be updated if we arrived at this function via a plt call or
26869 toc adjusting stub. */
26870 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
26871 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
26872 + RS6000_TOC_SAVE_SLOT);
26873 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
26874 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
26875 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
26876 validate_condition_mode (EQ, CCUNSmode);
26877 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
26878 emit_insn (gen_rtx_SET (compare_result,
26879 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
26880 toc_save_done = gen_label_rtx ();
26881 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26882 gen_rtx_EQ (VOIDmode, compare_result,
26883 const0_rtx),
26884 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
26885 pc_rtx);
26886 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26887 JUMP_LABEL (jump) = toc_save_done;
26888 LABEL_NUSES (toc_save_done) += 1;
26890 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
26891 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
26892 sp_off - frame_off);
26894 emit_label (toc_save_done);
26896 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26897 have a CFG that has different saves along different paths.
26898 Move the note to a dummy blockage insn, which describes that
26899 R2 is unconditionally saved after the label. */
26900 /* ??? An alternate representation might be a special insn pattern
26901 containing both the branch and the store. That might let the
26902 code that minimizes the number of DW_CFA_advance opcodes better
26903 freedom in placing the annotations. */
26904 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
26905 if (note)
26906 remove_note (save_insn, note);
26907 else
26908 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
26909 copy_rtx (PATTERN (save_insn)), NULL_RTX);
26910 RTX_FRAME_RELATED_P (save_insn) = 0;
26912 join_insn = emit_insn (gen_blockage ());
26913 REG_NOTES (join_insn) = note;
26914 RTX_FRAME_RELATED_P (join_insn) = 1;
26916 if (using_static_chain_p)
26918 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
26919 END_USE (0);
26921 else
26922 END_USE (11);
26925 /* Save CR if we use any that must be preserved. */
26926 if (!WORLD_SAVE_P (info) && info->cr_save_p)
26928 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26929 GEN_INT (info->cr_save_offset + frame_off));
26930 rtx mem = gen_frame_mem (SImode, addr);
26932 /* If we didn't copy cr before, do so now using r0. */
26933 if (cr_save_rtx == NULL_RTX)
26935 START_USE (0);
26936 cr_save_rtx = gen_rtx_REG (SImode, 0);
26937 rs6000_emit_move_from_cr (cr_save_rtx);
26940 /* Saving CR requires a two-instruction sequence: one instruction
26941 to move the CR to a general-purpose register, and a second
26942 instruction that stores the GPR to memory.
26944 We do not emit any DWARF CFI records for the first of these,
26945 because we cannot properly represent the fact that CR is saved in
26946 a register. One reason is that we cannot express that multiple
26947 CR fields are saved; another reason is that on 64-bit, the size
26948 of the CR register in DWARF (4 bytes) differs from the size of
26949 a general-purpose register.
26951 This means if any intervening instruction were to clobber one of
26952 the call-saved CR fields, we'd have incorrect CFI. To prevent
26953 this from happening, we mark the store to memory as a use of
26954 those CR fields, which prevents any such instruction from being
26955 scheduled in between the two instructions. */
26956 rtx crsave_v[9];
26957 int n_crsave = 0;
26958 int i;
26960 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
26961 for (i = 0; i < 8; i++)
26962 if (save_reg_p (CR0_REGNO + i))
26963 crsave_v[n_crsave++]
26964 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26966 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
26967 gen_rtvec_v (n_crsave, crsave_v)));
26968 END_USE (REGNO (cr_save_rtx));
26970 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
26971 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
26972 so we need to construct a frame expression manually. */
26973 RTX_FRAME_RELATED_P (insn) = 1;
26975 /* Update address to be stack-pointer relative, like
26976 rs6000_frame_related would do. */
26977 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26978 GEN_INT (info->cr_save_offset + sp_off));
26979 mem = gen_frame_mem (SImode, addr);
26981 if (DEFAULT_ABI == ABI_ELFv2)
26983 /* In the ELFv2 ABI we generate separate CFI records for each
26984 CR field that was actually saved. They all point to the
26985 same 32-bit stack slot. */
26986 rtx crframe[8];
26987 int n_crframe = 0;
26989 for (i = 0; i < 8; i++)
26990 if (save_reg_p (CR0_REGNO + i))
26992 crframe[n_crframe]
26993 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
26995 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
26996 n_crframe++;
26999 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27000 gen_rtx_PARALLEL (VOIDmode,
27001 gen_rtvec_v (n_crframe, crframe)));
27003 else
27005 /* In other ABIs, by convention, we use a single CR regnum to
27006 represent the fact that all call-saved CR fields are saved.
27007 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27008 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27009 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27013 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27014 *separate* slots if the routine calls __builtin_eh_return, so
27015 that they can be independently restored by the unwinder. */
27016 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27018 int i, cr_off = info->ehcr_offset;
27019 rtx crsave;
27021 /* ??? We might get better performance by using multiple mfocrf
27022 instructions. */
27023 crsave = gen_rtx_REG (SImode, 0);
27024 emit_insn (gen_movesi_from_cr (crsave));
27026 for (i = 0; i < 8; i++)
27027 if (!call_used_regs[CR0_REGNO + i])
27029 rtvec p = rtvec_alloc (2);
27030 RTVEC_ELT (p, 0)
27031 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27032 RTVEC_ELT (p, 1)
27033 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27035 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27037 RTX_FRAME_RELATED_P (insn) = 1;
27038 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27039 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27040 sp_reg_rtx, cr_off + sp_off));
27042 cr_off += reg_size;
27046 /* Update stack and set back pointer unless this is V.4,
27047 for which it was done previously. */
27048 if (!WORLD_SAVE_P (info) && info->push_p
27049 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27051 rtx ptr_reg = NULL;
27052 int ptr_off = 0;
27054 /* If saving altivec regs we need to be able to address all save
27055 locations using a 16-bit offset. */
27056 if ((strategy & SAVE_INLINE_VRS) == 0
27057 || (info->altivec_size != 0
27058 && (info->altivec_save_offset + info->altivec_size - 16
27059 + info->total_size - frame_off) > 32767)
27060 || (info->vrsave_size != 0
27061 && (info->vrsave_save_offset
27062 + info->total_size - frame_off) > 32767))
27064 int sel = SAVRES_SAVE | SAVRES_VR;
27065 unsigned ptr_regno = ptr_regno_for_savres (sel);
27067 if (using_static_chain_p
27068 && ptr_regno == STATIC_CHAIN_REGNUM)
27069 ptr_regno = 12;
27070 if (REGNO (frame_reg_rtx) != ptr_regno)
27071 START_USE (ptr_regno);
27072 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27073 frame_reg_rtx = ptr_reg;
27074 ptr_off = info->altivec_save_offset + info->altivec_size;
27075 frame_off = -ptr_off;
27077 else if (REGNO (frame_reg_rtx) == 1)
27078 frame_off = info->total_size;
27079 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27080 ptr_reg, ptr_off);
27081 if (REGNO (frame_reg_rtx) == 12)
27082 sp_adjust = 0;
27083 sp_off = info->total_size;
27084 if (frame_reg_rtx != sp_reg_rtx)
27085 rs6000_emit_stack_tie (frame_reg_rtx, false);
27088 /* Set frame pointer, if needed. */
27089 if (frame_pointer_needed)
27091 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27092 sp_reg_rtx);
27093 RTX_FRAME_RELATED_P (insn) = 1;
27096 /* Save AltiVec registers if needed. Save here because the red zone does
27097 not always include AltiVec registers. */
27098 if (!WORLD_SAVE_P (info)
27099 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27101 int end_save = info->altivec_save_offset + info->altivec_size;
27102 int ptr_off;
27103 /* Oddly, the vector save/restore functions point r0 at the end
27104 of the save area, then use r11 or r12 to load offsets for
27105 [reg+reg] addressing. */
27106 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27107 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27108 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27110 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27111 NOT_INUSE (0);
27112 if (scratch_regno == 12)
27113 sp_adjust = 0;
27114 if (end_save + frame_off != 0)
27116 rtx offset = GEN_INT (end_save + frame_off);
27118 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27120 else
27121 emit_move_insn (ptr_reg, frame_reg_rtx);
27123 ptr_off = -end_save;
27124 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27125 info->altivec_save_offset + ptr_off,
27126 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27127 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27128 NULL_RTX, NULL_RTX);
27129 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27131 /* The oddity mentioned above clobbered our frame reg. */
27132 emit_move_insn (frame_reg_rtx, ptr_reg);
27133 frame_off = ptr_off;
27136 else if (!WORLD_SAVE_P (info)
27137 && info->altivec_size != 0)
27139 int i;
27141 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27142 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27144 rtx areg, savereg, mem;
27145 HOST_WIDE_INT offset;
27147 offset = (info->altivec_save_offset + frame_off
27148 + 16 * (i - info->first_altivec_reg_save));
27150 savereg = gen_rtx_REG (V4SImode, i);
27152 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27154 mem = gen_frame_mem (V4SImode,
27155 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27156 GEN_INT (offset)));
27157 insn = emit_insn (gen_rtx_SET (mem, savereg));
27158 areg = NULL_RTX;
27160 else
27162 NOT_INUSE (0);
27163 areg = gen_rtx_REG (Pmode, 0);
27164 emit_move_insn (areg, GEN_INT (offset));
27166 /* AltiVec addressing mode is [reg+reg]. */
27167 mem = gen_frame_mem (V4SImode,
27168 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27170 /* Rather than emitting a generic move, force use of the stvx
27171 instruction, which we always want on ISA 2.07 (power8) systems.
27172 In particular we don't want xxpermdi/stxvd2x for little
27173 endian. */
27174 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27177 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27178 areg, GEN_INT (offset));
27182 /* VRSAVE is a bit vector representing which AltiVec registers
27183 are used. The OS uses this to determine which vector
27184 registers to save on a context switch. We need to save
27185 VRSAVE on the stack frame, add whatever AltiVec registers we
27186 used in this function, and do the corresponding magic in the
27187 epilogue. */
27189 if (!WORLD_SAVE_P (info)
27190 && info->vrsave_size != 0)
27192 rtx reg, vrsave;
27193 int offset;
27194 int save_regno;
27196 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
27197 be using r12 as frame_reg_rtx and r11 as the static chain
27198 pointer for nested functions. */
27199 save_regno = 12;
27200 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27201 && !using_static_chain_p)
27202 save_regno = 11;
27203 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27205 save_regno = 11;
27206 if (using_static_chain_p)
27207 save_regno = 0;
27210 NOT_INUSE (save_regno);
27211 reg = gen_rtx_REG (SImode, save_regno);
27212 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
27213 if (TARGET_MACHO)
27214 emit_insn (gen_get_vrsave_internal (reg));
27215 else
27216 emit_insn (gen_rtx_SET (reg, vrsave));
27218 /* Save VRSAVE. */
27219 offset = info->vrsave_save_offset + frame_off;
27220 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
27222 /* Include the registers in the mask. */
27223 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
27225 insn = emit_insn (generate_set_vrsave (reg, info, 0));
27228 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27229 if (!TARGET_SINGLE_PIC_BASE
27230 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
27231 || (DEFAULT_ABI == ABI_V4
27232 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27233 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27235 /* If emit_load_toc_table will use the link register, we need to save
27236 it. We use R12 for this purpose because emit_load_toc_table
27237 can use register 0. This allows us to use a plain 'blr' to return
27238 from the procedure more often. */
27239 int save_LR_around_toc_setup = (TARGET_ELF
27240 && DEFAULT_ABI == ABI_V4
27241 && flag_pic
27242 && ! info->lr_save_p
27243 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27244 if (save_LR_around_toc_setup)
27246 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27247 rtx tmp = gen_rtx_REG (Pmode, 12);
27249 sp_adjust = 0;
27250 insn = emit_move_insn (tmp, lr);
27251 RTX_FRAME_RELATED_P (insn) = 1;
27253 rs6000_emit_load_toc_table (TRUE);
27255 insn = emit_move_insn (lr, tmp);
27256 add_reg_note (insn, REG_CFA_RESTORE, lr);
27257 RTX_FRAME_RELATED_P (insn) = 1;
27259 else
27260 rs6000_emit_load_toc_table (TRUE);
27263 #if TARGET_MACHO
27264 if (!TARGET_SINGLE_PIC_BASE
27265 && DEFAULT_ABI == ABI_DARWIN
27266 && flag_pic && crtl->uses_pic_offset_table)
27268 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27269 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27271 /* Save and restore LR locally around this call (in R0). */
27272 if (!info->lr_save_p)
27273 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27275 emit_insn (gen_load_macho_picbase (src));
27277 emit_move_insn (gen_rtx_REG (Pmode,
27278 RS6000_PIC_OFFSET_TABLE_REGNUM),
27279 lr);
27281 if (!info->lr_save_p)
27282 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27284 #endif
27286 /* If we need to, save the TOC register after doing the stack setup.
27287 Do not emit eh frame info for this save. The unwinder wants info,
27288 conceptually attached to instructions in this function, about
27289 register values in the caller of this function. This R2 may have
27290 already been changed from the value in the caller.
27291 We don't attempt to write accurate DWARF EH frame info for R2
27292 because code emitted by gcc for a (non-pointer) function call
27293 doesn't save and restore R2. Instead, R2 is managed out-of-line
27294 by a linker generated plt call stub when the function resides in
27295 a shared library. This behavior is costly to describe in DWARF,
27296 both in terms of the size of DWARF info and the time taken in the
27297 unwinder to interpret it. R2 changes, apart from the
27298 calls_eh_return case earlier in this function, are handled by
27299 linux-unwind.h frob_update_context. */
27300 if (rs6000_save_toc_in_prologue_p ())
27302 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27303 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27306 if (using_split_stack && split_stack_arg_pointer_used_p ())
27308 /* Set up the arg pointer (r12) for -fsplit-stack code. If
27309 __morestack was called, it left the arg pointer to the old
27310 stack in r29. Otherwise, the arg pointer is the top of the
27311 current frame. */
27312 cfun->machine->split_stack_argp_used = true;
27313 if (sp_adjust)
27315 rtx r12 = gen_rtx_REG (Pmode, 12);
27316 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
27317 emit_insn_before (set_r12, sp_adjust);
27319 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
27321 rtx r12 = gen_rtx_REG (Pmode, 12);
27322 if (frame_off == 0)
27323 emit_move_insn (r12, frame_reg_rtx);
27324 else
27325 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
27327 if (info->push_p)
27329 rtx r12 = gen_rtx_REG (Pmode, 12);
27330 rtx r29 = gen_rtx_REG (Pmode, 29);
27331 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
27332 rtx not_more = gen_label_rtx ();
27333 rtx jump;
27335 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27336 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
27337 gen_rtx_LABEL_REF (VOIDmode, not_more),
27338 pc_rtx);
27339 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27340 JUMP_LABEL (jump) = not_more;
27341 LABEL_NUSES (not_more) += 1;
27342 emit_move_insn (r12, r29);
27343 emit_label (not_more);
27348 /* Output .extern statements for the save/restore routines we use. */
27350 static void
27351 rs6000_output_savres_externs (FILE *file)
27353 rs6000_stack_t *info = rs6000_stack_info ();
27355 if (TARGET_DEBUG_STACK)
27356 debug_stack_info (info);
27358 /* Write .extern for any function we will call to save and restore
27359 fp values. */
27360 if (info->first_fp_reg_save < 64
27361 && !TARGET_MACHO
27362 && !TARGET_ELF)
27364 char *name;
27365 int regno = info->first_fp_reg_save - 32;
27367 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27369 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27370 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27371 name = rs6000_savres_routine_name (info, regno, sel);
27372 fprintf (file, "\t.extern %s\n", name);
27374 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27376 bool lr = (info->savres_strategy
27377 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27378 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27379 name = rs6000_savres_routine_name (info, regno, sel);
27380 fprintf (file, "\t.extern %s\n", name);
27385 /* Write function prologue. */
27387 static void
27388 rs6000_output_function_prologue (FILE *file,
27389 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
27391 if (!cfun->is_thunk)
27392 rs6000_output_savres_externs (file);
27394 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27395 immediately after the global entry point label. */
27396 if (rs6000_global_entry_point_needed_p ())
27398 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27400 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27402 if (TARGET_CMODEL != CMODEL_LARGE)
27404 /* In the small and medium code models, we assume the TOC is less
27405 2 GB away from the text section, so it can be computed via the
27406 following two-instruction sequence. */
27407 char buf[256];
27409 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27410 fprintf (file, "0:\taddis 2,12,.TOC.-");
27411 assemble_name (file, buf);
27412 fprintf (file, "@ha\n");
27413 fprintf (file, "\taddi 2,2,.TOC.-");
27414 assemble_name (file, buf);
27415 fprintf (file, "@l\n");
27417 else
27419 /* In the large code model, we allow arbitrary offsets between the
27420 TOC and the text section, so we have to load the offset from
27421 memory. The data field is emitted directly before the global
27422 entry point in rs6000_elf_declare_function_name. */
27423 char buf[256];
27425 #ifdef HAVE_AS_ENTRY_MARKERS
27426 /* If supported by the linker, emit a marker relocation. If the
27427 total code size of the final executable or shared library
27428 happens to fit into 2 GB after all, the linker will replace
27429 this code sequence with the sequence for the small or medium
27430 code model. */
27431 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27432 #endif
27433 fprintf (file, "\tld 2,");
27434 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27435 assemble_name (file, buf);
27436 fprintf (file, "-");
27437 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27438 assemble_name (file, buf);
27439 fprintf (file, "(12)\n");
27440 fprintf (file, "\tadd 2,2,12\n");
27443 fputs ("\t.localentry\t", file);
27444 assemble_name (file, name);
27445 fputs (",.-", file);
27446 assemble_name (file, name);
27447 fputs ("\n", file);
27450 /* Output -mprofile-kernel code. This needs to be done here instead of
27451 in output_function_profile since it must go after the ELFv2 ABI
27452 local entry point. */
27453 if (TARGET_PROFILE_KERNEL && crtl->profile)
27455 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27456 gcc_assert (!TARGET_32BIT);
27458 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27460 /* In the ELFv2 ABI we have no compiler stack word. It must be
27461 the resposibility of _mcount to preserve the static chain
27462 register if required. */
27463 if (DEFAULT_ABI != ABI_ELFv2
27464 && cfun->static_chain_decl != NULL)
27466 asm_fprintf (file, "\tstd %s,24(%s)\n",
27467 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27468 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27469 asm_fprintf (file, "\tld %s,24(%s)\n",
27470 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27472 else
27473 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27476 rs6000_pic_labelno++;
27479 /* -mprofile-kernel code calls mcount before the function prolog,
27480 so a profiled leaf function should stay a leaf function. */
27481 static bool
27482 rs6000_keep_leaf_when_profiled ()
27484 return TARGET_PROFILE_KERNEL;
27487 /* Non-zero if vmx regs are restored before the frame pop, zero if
27488 we restore after the pop when possible. */
27489 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27491 /* Restoring cr is a two step process: loading a reg from the frame
27492 save, then moving the reg to cr. For ABI_V4 we must let the
27493 unwinder know that the stack location is no longer valid at or
27494 before the stack deallocation, but we can't emit a cfa_restore for
27495 cr at the stack deallocation like we do for other registers.
27496 The trouble is that it is possible for the move to cr to be
27497 scheduled after the stack deallocation. So say exactly where cr
27498 is located on each of the two insns. */
27500 static rtx
27501 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27503 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27504 rtx reg = gen_rtx_REG (SImode, regno);
27505 rtx_insn *insn = emit_move_insn (reg, mem);
27507 if (!exit_func && DEFAULT_ABI == ABI_V4)
27509 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27510 rtx set = gen_rtx_SET (reg, cr);
27512 add_reg_note (insn, REG_CFA_REGISTER, set);
27513 RTX_FRAME_RELATED_P (insn) = 1;
27515 return reg;
27518 /* Reload CR from REG. */
27520 static void
27521 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27523 int count = 0;
27524 int i;
27526 if (using_mfcr_multiple)
27528 for (i = 0; i < 8; i++)
27529 if (save_reg_p (CR0_REGNO + i))
27530 count++;
27531 gcc_assert (count);
27534 if (using_mfcr_multiple && count > 1)
27536 rtx_insn *insn;
27537 rtvec p;
27538 int ndx;
27540 p = rtvec_alloc (count);
27542 ndx = 0;
27543 for (i = 0; i < 8; i++)
27544 if (save_reg_p (CR0_REGNO + i))
27546 rtvec r = rtvec_alloc (2);
27547 RTVEC_ELT (r, 0) = reg;
27548 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27549 RTVEC_ELT (p, ndx) =
27550 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27551 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27552 ndx++;
27554 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27555 gcc_assert (ndx == count);
27557 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27558 CR field separately. */
27559 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27561 for (i = 0; i < 8; i++)
27562 if (save_reg_p (CR0_REGNO + i))
27563 add_reg_note (insn, REG_CFA_RESTORE,
27564 gen_rtx_REG (SImode, CR0_REGNO + i));
27566 RTX_FRAME_RELATED_P (insn) = 1;
27569 else
27570 for (i = 0; i < 8; i++)
27571 if (save_reg_p (CR0_REGNO + i))
27573 rtx insn = emit_insn (gen_movsi_to_cr_one
27574 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27576 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27577 CR field separately, attached to the insn that in fact
27578 restores this particular CR field. */
27579 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27581 add_reg_note (insn, REG_CFA_RESTORE,
27582 gen_rtx_REG (SImode, CR0_REGNO + i));
27584 RTX_FRAME_RELATED_P (insn) = 1;
27588 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27589 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27590 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27592 rtx_insn *insn = get_last_insn ();
27593 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27595 add_reg_note (insn, REG_CFA_RESTORE, cr);
27596 RTX_FRAME_RELATED_P (insn) = 1;
27600 /* Like cr, the move to lr instruction can be scheduled after the
27601 stack deallocation, but unlike cr, its stack frame save is still
27602 valid. So we only need to emit the cfa_restore on the correct
27603 instruction. */
27605 static void
27606 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27608 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27609 rtx reg = gen_rtx_REG (Pmode, regno);
27611 emit_move_insn (reg, mem);
27614 static void
27615 restore_saved_lr (int regno, bool exit_func)
27617 rtx reg = gen_rtx_REG (Pmode, regno);
27618 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27619 rtx_insn *insn = emit_move_insn (lr, reg);
27621 if (!exit_func && flag_shrink_wrap)
27623 add_reg_note (insn, REG_CFA_RESTORE, lr);
27624 RTX_FRAME_RELATED_P (insn) = 1;
27628 static rtx
27629 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27631 if (DEFAULT_ABI == ABI_ELFv2)
27633 int i;
27634 for (i = 0; i < 8; i++)
27635 if (save_reg_p (CR0_REGNO + i))
27637 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27638 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27639 cfa_restores);
27642 else if (info->cr_save_p)
27643 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27644 gen_rtx_REG (SImode, CR2_REGNO),
27645 cfa_restores);
27647 if (info->lr_save_p)
27648 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27649 gen_rtx_REG (Pmode, LR_REGNO),
27650 cfa_restores);
27651 return cfa_restores;
27654 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27655 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27656 below stack pointer not cloberred by signals. */
27658 static inline bool
27659 offset_below_red_zone_p (HOST_WIDE_INT offset)
27661 return offset < (DEFAULT_ABI == ABI_V4
27663 : TARGET_32BIT ? -220 : -288);
27666 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27668 static void
27669 emit_cfa_restores (rtx cfa_restores)
27671 rtx_insn *insn = get_last_insn ();
27672 rtx *loc = &REG_NOTES (insn);
27674 while (*loc)
27675 loc = &XEXP (*loc, 1);
27676 *loc = cfa_restores;
27677 RTX_FRAME_RELATED_P (insn) = 1;
27680 /* Emit function epilogue as insns. */
27682 void
27683 rs6000_emit_epilogue (int sibcall)
27685 rs6000_stack_t *info;
27686 int restoring_GPRs_inline;
27687 int restoring_FPRs_inline;
27688 int using_load_multiple;
27689 int using_mtcr_multiple;
27690 int use_backchain_to_restore_sp;
27691 int restore_lr;
27692 int strategy;
27693 HOST_WIDE_INT frame_off = 0;
27694 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27695 rtx frame_reg_rtx = sp_reg_rtx;
27696 rtx cfa_restores = NULL_RTX;
27697 rtx insn;
27698 rtx cr_save_reg = NULL_RTX;
27699 machine_mode reg_mode = Pmode;
27700 int reg_size = TARGET_32BIT ? 4 : 8;
27701 int i;
27702 bool exit_func;
27703 unsigned ptr_regno;
27705 info = rs6000_stack_info ();
27707 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
27709 reg_mode = V2SImode;
27710 reg_size = 8;
27713 strategy = info->savres_strategy;
27714 using_load_multiple = strategy & REST_MULTIPLE;
27715 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27716 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27717 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
27718 || rs6000_cpu == PROCESSOR_PPC603
27719 || rs6000_cpu == PROCESSOR_PPC750
27720 || optimize_size);
27721 /* Restore via the backchain when we have a large frame, since this
27722 is more efficient than an addis, addi pair. The second condition
27723 here will not trigger at the moment; We don't actually need a
27724 frame pointer for alloca, but the generic parts of the compiler
27725 give us one anyway. */
27726 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27727 ? info->lr_save_offset
27728 : 0) > 32767
27729 || (cfun->calls_alloca
27730 && !frame_pointer_needed));
27731 restore_lr = (info->lr_save_p
27732 && (restoring_FPRs_inline
27733 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27734 && (restoring_GPRs_inline
27735 || info->first_fp_reg_save < 64));
27737 if (WORLD_SAVE_P (info))
27739 int i, j;
27740 char rname[30];
27741 const char *alloc_rname;
27742 rtvec p;
27744 /* eh_rest_world_r10 will return to the location saved in the LR
27745 stack slot (which is not likely to be our caller.)
27746 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27747 rest_world is similar, except any R10 parameter is ignored.
27748 The exception-handling stuff that was here in 2.95 is no
27749 longer necessary. */
27751 p = rtvec_alloc (9
27753 + 32 - info->first_gp_reg_save
27754 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27755 + 63 + 1 - info->first_fp_reg_save);
27757 strcpy (rname, ((crtl->calls_eh_return) ?
27758 "*eh_rest_world_r10" : "*rest_world"));
27759 alloc_rname = ggc_strdup (rname);
27761 j = 0;
27762 RTVEC_ELT (p, j++) = ret_rtx;
27763 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27764 gen_rtx_REG (Pmode,
27765 LR_REGNO));
27766 RTVEC_ELT (p, j++)
27767 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27768 /* The instruction pattern requires a clobber here;
27769 it is shared with the restVEC helper. */
27770 RTVEC_ELT (p, j++)
27771 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27774 /* CR register traditionally saved as CR2. */
27775 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27776 RTVEC_ELT (p, j++)
27777 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27778 if (flag_shrink_wrap)
27780 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27781 gen_rtx_REG (Pmode, LR_REGNO),
27782 cfa_restores);
27783 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27787 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27789 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27790 RTVEC_ELT (p, j++)
27791 = gen_frame_load (reg,
27792 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27793 if (flag_shrink_wrap)
27794 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27796 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27798 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27799 RTVEC_ELT (p, j++)
27800 = gen_frame_load (reg,
27801 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27802 if (flag_shrink_wrap)
27803 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27805 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27807 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27808 ? DFmode : SFmode),
27809 info->first_fp_reg_save + i);
27810 RTVEC_ELT (p, j++)
27811 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27812 if (flag_shrink_wrap)
27813 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27815 RTVEC_ELT (p, j++)
27816 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27817 RTVEC_ELT (p, j++)
27818 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27819 RTVEC_ELT (p, j++)
27820 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27821 RTVEC_ELT (p, j++)
27822 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27823 RTVEC_ELT (p, j++)
27824 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27825 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27827 if (flag_shrink_wrap)
27829 REG_NOTES (insn) = cfa_restores;
27830 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27831 RTX_FRAME_RELATED_P (insn) = 1;
27833 return;
27836 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27837 if (info->push_p)
27838 frame_off = info->total_size;
27840 /* Restore AltiVec registers if we must do so before adjusting the
27841 stack. */
27842 if (info->altivec_size != 0
27843 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27844 || (DEFAULT_ABI != ABI_V4
27845 && offset_below_red_zone_p (info->altivec_save_offset))))
27847 int i;
27848 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27850 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27851 if (use_backchain_to_restore_sp)
27853 int frame_regno = 11;
27855 if ((strategy & REST_INLINE_VRS) == 0)
27857 /* Of r11 and r12, select the one not clobbered by an
27858 out-of-line restore function for the frame register. */
27859 frame_regno = 11 + 12 - scratch_regno;
27861 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27862 emit_move_insn (frame_reg_rtx,
27863 gen_rtx_MEM (Pmode, sp_reg_rtx));
27864 frame_off = 0;
27866 else if (frame_pointer_needed)
27867 frame_reg_rtx = hard_frame_pointer_rtx;
27869 if ((strategy & REST_INLINE_VRS) == 0)
27871 int end_save = info->altivec_save_offset + info->altivec_size;
27872 int ptr_off;
27873 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27874 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27876 if (end_save + frame_off != 0)
27878 rtx offset = GEN_INT (end_save + frame_off);
27880 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27882 else
27883 emit_move_insn (ptr_reg, frame_reg_rtx);
27885 ptr_off = -end_save;
27886 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27887 info->altivec_save_offset + ptr_off,
27888 0, V4SImode, SAVRES_VR);
27890 else
27892 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27893 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27895 rtx addr, areg, mem, insn;
27896 rtx reg = gen_rtx_REG (V4SImode, i);
27897 HOST_WIDE_INT offset
27898 = (info->altivec_save_offset + frame_off
27899 + 16 * (i - info->first_altivec_reg_save));
27901 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27903 mem = gen_frame_mem (V4SImode,
27904 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27905 GEN_INT (offset)));
27906 insn = gen_rtx_SET (reg, mem);
27908 else
27910 areg = gen_rtx_REG (Pmode, 0);
27911 emit_move_insn (areg, GEN_INT (offset));
27913 /* AltiVec addressing mode is [reg+reg]. */
27914 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27915 mem = gen_frame_mem (V4SImode, addr);
27917 /* Rather than emitting a generic move, force use of the
27918 lvx instruction, which we always want. In particular we
27919 don't want lxvd2x/xxpermdi for little endian. */
27920 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27923 (void) emit_insn (insn);
27927 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27928 if (((strategy & REST_INLINE_VRS) == 0
27929 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27930 && (flag_shrink_wrap
27931 || (offset_below_red_zone_p
27932 (info->altivec_save_offset
27933 + 16 * (i - info->first_altivec_reg_save)))))
27935 rtx reg = gen_rtx_REG (V4SImode, i);
27936 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27940 /* Restore VRSAVE if we must do so before adjusting the stack. */
27941 if (info->vrsave_size != 0
27942 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27943 || (DEFAULT_ABI != ABI_V4
27944 && offset_below_red_zone_p (info->vrsave_save_offset))))
27946 rtx reg;
27948 if (frame_reg_rtx == sp_reg_rtx)
27950 if (use_backchain_to_restore_sp)
27952 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27953 emit_move_insn (frame_reg_rtx,
27954 gen_rtx_MEM (Pmode, sp_reg_rtx));
27955 frame_off = 0;
27957 else if (frame_pointer_needed)
27958 frame_reg_rtx = hard_frame_pointer_rtx;
27961 reg = gen_rtx_REG (SImode, 12);
27962 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27963 info->vrsave_save_offset + frame_off));
27965 emit_insn (generate_set_vrsave (reg, info, 1));
27968 insn = NULL_RTX;
27969 /* If we have a large stack frame, restore the old stack pointer
27970 using the backchain. */
27971 if (use_backchain_to_restore_sp)
27973 if (frame_reg_rtx == sp_reg_rtx)
27975 /* Under V.4, don't reset the stack pointer until after we're done
27976 loading the saved registers. */
27977 if (DEFAULT_ABI == ABI_V4)
27978 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27980 insn = emit_move_insn (frame_reg_rtx,
27981 gen_rtx_MEM (Pmode, sp_reg_rtx));
27982 frame_off = 0;
27984 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27985 && DEFAULT_ABI == ABI_V4)
27986 /* frame_reg_rtx has been set up by the altivec restore. */
27988 else
27990 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
27991 frame_reg_rtx = sp_reg_rtx;
27994 /* If we have a frame pointer, we can restore the old stack pointer
27995 from it. */
27996 else if (frame_pointer_needed)
27998 frame_reg_rtx = sp_reg_rtx;
27999 if (DEFAULT_ABI == ABI_V4)
28000 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28001 /* Prevent reordering memory accesses against stack pointer restore. */
28002 else if (cfun->calls_alloca
28003 || offset_below_red_zone_p (-info->total_size))
28004 rs6000_emit_stack_tie (frame_reg_rtx, true);
28006 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28007 GEN_INT (info->total_size)));
28008 frame_off = 0;
28010 else if (info->push_p
28011 && DEFAULT_ABI != ABI_V4
28012 && !crtl->calls_eh_return)
28014 /* Prevent reordering memory accesses against stack pointer restore. */
28015 if (cfun->calls_alloca
28016 || offset_below_red_zone_p (-info->total_size))
28017 rs6000_emit_stack_tie (frame_reg_rtx, false);
28018 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28019 GEN_INT (info->total_size)));
28020 frame_off = 0;
28022 if (insn && frame_reg_rtx == sp_reg_rtx)
28024 if (cfa_restores)
28026 REG_NOTES (insn) = cfa_restores;
28027 cfa_restores = NULL_RTX;
28029 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28030 RTX_FRAME_RELATED_P (insn) = 1;
28033 /* Restore AltiVec registers if we have not done so already. */
28034 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28035 && info->altivec_size != 0
28036 && (DEFAULT_ABI == ABI_V4
28037 || !offset_below_red_zone_p (info->altivec_save_offset)))
28039 int i;
28041 if ((strategy & REST_INLINE_VRS) == 0)
28043 int end_save = info->altivec_save_offset + info->altivec_size;
28044 int ptr_off;
28045 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28046 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28047 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28049 if (end_save + frame_off != 0)
28051 rtx offset = GEN_INT (end_save + frame_off);
28053 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28055 else
28056 emit_move_insn (ptr_reg, frame_reg_rtx);
28058 ptr_off = -end_save;
28059 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28060 info->altivec_save_offset + ptr_off,
28061 0, V4SImode, SAVRES_VR);
28062 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28064 /* Frame reg was clobbered by out-of-line save. Restore it
28065 from ptr_reg, and if we are calling out-of-line gpr or
28066 fpr restore set up the correct pointer and offset. */
28067 unsigned newptr_regno = 1;
28068 if (!restoring_GPRs_inline)
28070 bool lr = info->gp_save_offset + info->gp_size == 0;
28071 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28072 newptr_regno = ptr_regno_for_savres (sel);
28073 end_save = info->gp_save_offset + info->gp_size;
28075 else if (!restoring_FPRs_inline)
28077 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28078 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28079 newptr_regno = ptr_regno_for_savres (sel);
28080 end_save = info->fp_save_offset + info->fp_size;
28083 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28084 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28086 if (end_save + ptr_off != 0)
28088 rtx offset = GEN_INT (end_save + ptr_off);
28090 frame_off = -end_save;
28091 if (TARGET_32BIT)
28092 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28093 ptr_reg, offset));
28094 else
28095 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28096 ptr_reg, offset));
28098 else
28100 frame_off = ptr_off;
28101 emit_move_insn (frame_reg_rtx, ptr_reg);
28105 else
28107 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28108 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28110 rtx addr, areg, mem, insn;
28111 rtx reg = gen_rtx_REG (V4SImode, i);
28112 HOST_WIDE_INT offset
28113 = (info->altivec_save_offset + frame_off
28114 + 16 * (i - info->first_altivec_reg_save));
28116 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
28118 mem = gen_frame_mem (V4SImode,
28119 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28120 GEN_INT (offset)));
28121 insn = gen_rtx_SET (reg, mem);
28123 else
28125 areg = gen_rtx_REG (Pmode, 0);
28126 emit_move_insn (areg, GEN_INT (offset));
28128 /* AltiVec addressing mode is [reg+reg]. */
28129 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28130 mem = gen_frame_mem (V4SImode, addr);
28132 /* Rather than emitting a generic move, force use of the
28133 lvx instruction, which we always want. In particular we
28134 don't want lxvd2x/xxpermdi for little endian. */
28135 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28138 (void) emit_insn (insn);
28142 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28143 if (((strategy & REST_INLINE_VRS) == 0
28144 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28145 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28147 rtx reg = gen_rtx_REG (V4SImode, i);
28148 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28152 /* Restore VRSAVE if we have not done so already. */
28153 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28154 && info->vrsave_size != 0
28155 && (DEFAULT_ABI == ABI_V4
28156 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28158 rtx reg;
28160 reg = gen_rtx_REG (SImode, 12);
28161 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28162 info->vrsave_save_offset + frame_off));
28164 emit_insn (generate_set_vrsave (reg, info, 1));
28167 /* If we exit by an out-of-line restore function on ABI_V4 then that
28168 function will deallocate the stack, so we don't need to worry
28169 about the unwinder restoring cr from an invalid stack frame
28170 location. */
28171 exit_func = (!restoring_FPRs_inline
28172 || (!restoring_GPRs_inline
28173 && info->first_fp_reg_save == 64));
28175 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28176 *separate* slots if the routine calls __builtin_eh_return, so
28177 that they can be independently restored by the unwinder. */
28178 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28180 int i, cr_off = info->ehcr_offset;
28182 for (i = 0; i < 8; i++)
28183 if (!call_used_regs[CR0_REGNO + i])
28185 rtx reg = gen_rtx_REG (SImode, 0);
28186 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28187 cr_off + frame_off));
28189 insn = emit_insn (gen_movsi_to_cr_one
28190 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28192 if (!exit_func && flag_shrink_wrap)
28194 add_reg_note (insn, REG_CFA_RESTORE,
28195 gen_rtx_REG (SImode, CR0_REGNO + i));
28197 RTX_FRAME_RELATED_P (insn) = 1;
28200 cr_off += reg_size;
28204 /* Get the old lr if we saved it. If we are restoring registers
28205 out-of-line, then the out-of-line routines can do this for us. */
28206 if (restore_lr && restoring_GPRs_inline)
28207 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28209 /* Get the old cr if we saved it. */
28210 if (info->cr_save_p)
28212 unsigned cr_save_regno = 12;
28214 if (!restoring_GPRs_inline)
28216 /* Ensure we don't use the register used by the out-of-line
28217 gpr register restore below. */
28218 bool lr = info->gp_save_offset + info->gp_size == 0;
28219 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28220 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28222 if (gpr_ptr_regno == 12)
28223 cr_save_regno = 11;
28224 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28226 else if (REGNO (frame_reg_rtx) == 12)
28227 cr_save_regno = 11;
28229 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28230 info->cr_save_offset + frame_off,
28231 exit_func);
28234 /* Set LR here to try to overlap restores below. */
28235 if (restore_lr && restoring_GPRs_inline)
28236 restore_saved_lr (0, exit_func);
28238 /* Load exception handler data registers, if needed. */
28239 if (crtl->calls_eh_return)
28241 unsigned int i, regno;
28243 if (TARGET_AIX)
28245 rtx reg = gen_rtx_REG (reg_mode, 2);
28246 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28247 frame_off + RS6000_TOC_SAVE_SLOT));
28250 for (i = 0; ; ++i)
28252 rtx mem;
28254 regno = EH_RETURN_DATA_REGNO (i);
28255 if (regno == INVALID_REGNUM)
28256 break;
28258 /* Note: possible use of r0 here to address SPE regs. */
28259 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28260 info->ehrd_offset + frame_off
28261 + reg_size * (int) i);
28263 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28267 /* Restore GPRs. This is done as a PARALLEL if we are using
28268 the load-multiple instructions. */
28269 if (TARGET_SPE_ABI
28270 && info->spe_64bit_regs_used
28271 && info->first_gp_reg_save != 32)
28273 /* Determine whether we can address all of the registers that need
28274 to be saved with an offset from frame_reg_rtx that fits in
28275 the small const field for SPE memory instructions. */
28276 int spe_regs_addressable
28277 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
28278 + reg_size * (32 - info->first_gp_reg_save - 1))
28279 && restoring_GPRs_inline);
28281 if (!spe_regs_addressable)
28283 int ool_adjust = 0;
28284 rtx old_frame_reg_rtx = frame_reg_rtx;
28285 /* Make r11 point to the start of the SPE save area. We worried about
28286 not clobbering it when we were saving registers in the prologue.
28287 There's no need to worry here because the static chain is passed
28288 anew to every function. */
28290 if (!restoring_GPRs_inline)
28291 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
28292 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28293 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
28294 GEN_INT (info->spe_gp_save_offset
28295 + frame_off
28296 - ool_adjust)));
28297 /* Keep the invariant that frame_reg_rtx + frame_off points
28298 at the top of the stack frame. */
28299 frame_off = -info->spe_gp_save_offset + ool_adjust;
28302 if (restoring_GPRs_inline)
28304 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
28306 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28307 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
28309 rtx offset, addr, mem, reg;
28311 /* We're doing all this to ensure that the immediate offset
28312 fits into the immediate field of 'evldd'. */
28313 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
28315 offset = GEN_INT (spe_offset + reg_size * i);
28316 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
28317 mem = gen_rtx_MEM (V2SImode, addr);
28318 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28320 emit_move_insn (reg, mem);
28323 else
28324 rs6000_emit_savres_rtx (info, frame_reg_rtx,
28325 info->spe_gp_save_offset + frame_off,
28326 info->lr_save_offset + frame_off,
28327 reg_mode,
28328 SAVRES_GPR | SAVRES_LR);
28330 else if (!restoring_GPRs_inline)
28332 /* We are jumping to an out-of-line function. */
28333 rtx ptr_reg;
28334 int end_save = info->gp_save_offset + info->gp_size;
28335 bool can_use_exit = end_save == 0;
28336 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28337 int ptr_off;
28339 /* Emit stack reset code if we need it. */
28340 ptr_regno = ptr_regno_for_savres (sel);
28341 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28342 if (can_use_exit)
28343 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
28344 else if (end_save + frame_off != 0)
28345 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28346 GEN_INT (end_save + frame_off)));
28347 else if (REGNO (frame_reg_rtx) != ptr_regno)
28348 emit_move_insn (ptr_reg, frame_reg_rtx);
28349 if (REGNO (frame_reg_rtx) == ptr_regno)
28350 frame_off = -end_save;
28352 if (can_use_exit && info->cr_save_p)
28353 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28355 ptr_off = -end_save;
28356 rs6000_emit_savres_rtx (info, ptr_reg,
28357 info->gp_save_offset + ptr_off,
28358 info->lr_save_offset + ptr_off,
28359 reg_mode, sel);
28361 else if (using_load_multiple)
28363 rtvec p;
28364 p = rtvec_alloc (32 - info->first_gp_reg_save);
28365 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28366 RTVEC_ELT (p, i)
28367 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28368 frame_reg_rtx,
28369 info->gp_save_offset + frame_off + reg_size * i);
28370 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28372 else
28374 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28375 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
28376 emit_insn (gen_frame_load
28377 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28378 frame_reg_rtx,
28379 info->gp_save_offset + frame_off + reg_size * i));
28382 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28384 /* If the frame pointer was used then we can't delay emitting
28385 a REG_CFA_DEF_CFA note. This must happen on the insn that
28386 restores the frame pointer, r31. We may have already emitted
28387 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28388 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28389 be harmless if emitted. */
28390 if (frame_pointer_needed)
28392 insn = get_last_insn ();
28393 add_reg_note (insn, REG_CFA_DEF_CFA,
28394 plus_constant (Pmode, frame_reg_rtx, frame_off));
28395 RTX_FRAME_RELATED_P (insn) = 1;
28398 /* Set up cfa_restores. We always need these when
28399 shrink-wrapping. If not shrink-wrapping then we only need
28400 the cfa_restore when the stack location is no longer valid.
28401 The cfa_restores must be emitted on or before the insn that
28402 invalidates the stack, and of course must not be emitted
28403 before the insn that actually does the restore. The latter
28404 is why it is a bad idea to emit the cfa_restores as a group
28405 on the last instruction here that actually does a restore:
28406 That insn may be reordered with respect to others doing
28407 restores. */
28408 if (flag_shrink_wrap
28409 && !restoring_GPRs_inline
28410 && info->first_fp_reg_save == 64)
28411 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28413 for (i = info->first_gp_reg_save; i < 32; i++)
28414 if (!restoring_GPRs_inline
28415 || using_load_multiple
28416 || rs6000_reg_live_or_pic_offset_p (i))
28418 rtx reg = gen_rtx_REG (reg_mode, i);
28420 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28424 if (!restoring_GPRs_inline
28425 && info->first_fp_reg_save == 64)
28427 /* We are jumping to an out-of-line function. */
28428 if (cfa_restores)
28429 emit_cfa_restores (cfa_restores);
28430 return;
28433 if (restore_lr && !restoring_GPRs_inline)
28435 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28436 restore_saved_lr (0, exit_func);
28439 /* Restore fpr's if we need to do it without calling a function. */
28440 if (restoring_FPRs_inline)
28441 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28442 if (save_reg_p (info->first_fp_reg_save + i))
28444 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28445 ? DFmode : SFmode),
28446 info->first_fp_reg_save + i);
28447 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28448 info->fp_save_offset + frame_off + 8 * i));
28449 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28450 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28453 /* If we saved cr, restore it here. Just those that were used. */
28454 if (info->cr_save_p)
28455 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28457 /* If this is V.4, unwind the stack pointer after all of the loads
28458 have been done, or set up r11 if we are restoring fp out of line. */
28459 ptr_regno = 1;
28460 if (!restoring_FPRs_inline)
28462 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28463 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28464 ptr_regno = ptr_regno_for_savres (sel);
28467 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
28468 if (REGNO (frame_reg_rtx) == ptr_regno)
28469 frame_off = 0;
28471 if (insn && restoring_FPRs_inline)
28473 if (cfa_restores)
28475 REG_NOTES (insn) = cfa_restores;
28476 cfa_restores = NULL_RTX;
28478 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28479 RTX_FRAME_RELATED_P (insn) = 1;
28482 if (crtl->calls_eh_return)
28484 rtx sa = EH_RETURN_STACKADJ_RTX;
28485 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28488 if (!sibcall)
28490 rtvec p;
28491 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28492 if (! restoring_FPRs_inline)
28494 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
28495 RTVEC_ELT (p, 0) = ret_rtx;
28497 else
28499 if (cfa_restores)
28501 /* We can't hang the cfa_restores off a simple return,
28502 since the shrink-wrap code sometimes uses an existing
28503 return. This means there might be a path from
28504 pre-prologue code to this return, and dwarf2cfi code
28505 wants the eh_frame unwinder state to be the same on
28506 all paths to any point. So we need to emit the
28507 cfa_restores before the return. For -m64 we really
28508 don't need epilogue cfa_restores at all, except for
28509 this irritating dwarf2cfi with shrink-wrap
28510 requirement; The stack red-zone means eh_frame info
28511 from the prologue telling the unwinder to restore
28512 from the stack is perfectly good right to the end of
28513 the function. */
28514 emit_insn (gen_blockage ());
28515 emit_cfa_restores (cfa_restores);
28516 cfa_restores = NULL_RTX;
28518 p = rtvec_alloc (2);
28519 RTVEC_ELT (p, 0) = simple_return_rtx;
28522 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
28523 ? gen_rtx_USE (VOIDmode,
28524 gen_rtx_REG (Pmode, LR_REGNO))
28525 : gen_rtx_CLOBBER (VOIDmode,
28526 gen_rtx_REG (Pmode, LR_REGNO)));
28528 /* If we have to restore more than two FP registers, branch to the
28529 restore function. It will return to our caller. */
28530 if (! restoring_FPRs_inline)
28532 int i;
28533 int reg;
28534 rtx sym;
28536 if (flag_shrink_wrap)
28537 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28539 sym = rs6000_savres_routine_sym (info,
28540 SAVRES_FPR | (lr ? SAVRES_LR : 0));
28541 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
28542 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28543 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28545 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28547 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28549 RTVEC_ELT (p, i + 4)
28550 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28551 if (flag_shrink_wrap)
28552 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28553 cfa_restores);
28557 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28560 if (cfa_restores)
28562 if (sibcall)
28563 /* Ensure the cfa_restores are hung off an insn that won't
28564 be reordered above other restores. */
28565 emit_insn (gen_blockage ());
28567 emit_cfa_restores (cfa_restores);
28571 /* Write function epilogue. */
28573 static void
28574 rs6000_output_function_epilogue (FILE *file,
28575 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
28577 #if TARGET_MACHO
28578 macho_branch_islands ();
28579 /* Mach-O doesn't support labels at the end of objects, so if
28580 it looks like we might want one, insert a NOP. */
28582 rtx_insn *insn = get_last_insn ();
28583 rtx_insn *deleted_debug_label = NULL;
28584 while (insn
28585 && NOTE_P (insn)
28586 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28588 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28589 notes only, instead set their CODE_LABEL_NUMBER to -1,
28590 otherwise there would be code generation differences
28591 in between -g and -g0. */
28592 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28593 deleted_debug_label = insn;
28594 insn = PREV_INSN (insn);
28596 if (insn
28597 && (LABEL_P (insn)
28598 || (NOTE_P (insn)
28599 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
28600 fputs ("\tnop\n", file);
28601 else if (deleted_debug_label)
28602 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28603 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28604 CODE_LABEL_NUMBER (insn) = -1;
28606 #endif
28608 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28609 on its format.
28611 We don't output a traceback table if -finhibit-size-directive was
28612 used. The documentation for -finhibit-size-directive reads
28613 ``don't output a @code{.size} assembler directive, or anything
28614 else that would cause trouble if the function is split in the
28615 middle, and the two halves are placed at locations far apart in
28616 memory.'' The traceback table has this property, since it
28617 includes the offset from the start of the function to the
28618 traceback table itself.
28620 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28621 different traceback table. */
28622 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28623 && ! flag_inhibit_size_directive
28624 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28626 const char *fname = NULL;
28627 const char *language_string = lang_hooks.name;
28628 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28629 int i;
28630 int optional_tbtab;
28631 rs6000_stack_t *info = rs6000_stack_info ();
28633 if (rs6000_traceback == traceback_full)
28634 optional_tbtab = 1;
28635 else if (rs6000_traceback == traceback_part)
28636 optional_tbtab = 0;
28637 else
28638 optional_tbtab = !optimize_size && !TARGET_ELF;
28640 if (optional_tbtab)
28642 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28643 while (*fname == '.') /* V.4 encodes . in the name */
28644 fname++;
28646 /* Need label immediately before tbtab, so we can compute
28647 its offset from the function start. */
28648 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28649 ASM_OUTPUT_LABEL (file, fname);
28652 /* The .tbtab pseudo-op can only be used for the first eight
28653 expressions, since it can't handle the possibly variable
28654 length fields that follow. However, if you omit the optional
28655 fields, the assembler outputs zeros for all optional fields
28656 anyways, giving each variable length field is minimum length
28657 (as defined in sys/debug.h). Thus we can not use the .tbtab
28658 pseudo-op at all. */
28660 /* An all-zero word flags the start of the tbtab, for debuggers
28661 that have to find it by searching forward from the entry
28662 point or from the current pc. */
28663 fputs ("\t.long 0\n", file);
28665 /* Tbtab format type. Use format type 0. */
28666 fputs ("\t.byte 0,", file);
28668 /* Language type. Unfortunately, there does not seem to be any
28669 official way to discover the language being compiled, so we
28670 use language_string.
28671 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28672 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28673 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28674 either, so for now use 0. */
28675 if (lang_GNU_C ()
28676 || ! strcmp (language_string, "GNU GIMPLE")
28677 || ! strcmp (language_string, "GNU Go")
28678 || ! strcmp (language_string, "libgccjit"))
28679 i = 0;
28680 else if (! strcmp (language_string, "GNU F77")
28681 || lang_GNU_Fortran ())
28682 i = 1;
28683 else if (! strcmp (language_string, "GNU Pascal"))
28684 i = 2;
28685 else if (! strcmp (language_string, "GNU Ada"))
28686 i = 3;
28687 else if (lang_GNU_CXX ()
28688 || ! strcmp (language_string, "GNU Objective-C++"))
28689 i = 9;
28690 else if (! strcmp (language_string, "GNU Java"))
28691 i = 13;
28692 else if (! strcmp (language_string, "GNU Objective-C"))
28693 i = 14;
28694 else
28695 gcc_unreachable ();
28696 fprintf (file, "%d,", i);
28698 /* 8 single bit fields: global linkage (not set for C extern linkage,
28699 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28700 from start of procedure stored in tbtab, internal function, function
28701 has controlled storage, function has no toc, function uses fp,
28702 function logs/aborts fp operations. */
28703 /* Assume that fp operations are used if any fp reg must be saved. */
28704 fprintf (file, "%d,",
28705 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28707 /* 6 bitfields: function is interrupt handler, name present in
28708 proc table, function calls alloca, on condition directives
28709 (controls stack walks, 3 bits), saves condition reg, saves
28710 link reg. */
28711 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28712 set up as a frame pointer, even when there is no alloca call. */
28713 fprintf (file, "%d,",
28714 ((optional_tbtab << 6)
28715 | ((optional_tbtab & frame_pointer_needed) << 5)
28716 | (info->cr_save_p << 1)
28717 | (info->lr_save_p)));
28719 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28720 (6 bits). */
28721 fprintf (file, "%d,",
28722 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28724 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28725 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28727 if (optional_tbtab)
28729 /* Compute the parameter info from the function decl argument
28730 list. */
28731 tree decl;
28732 int next_parm_info_bit = 31;
28734 for (decl = DECL_ARGUMENTS (current_function_decl);
28735 decl; decl = DECL_CHAIN (decl))
28737 rtx parameter = DECL_INCOMING_RTL (decl);
28738 machine_mode mode = GET_MODE (parameter);
28740 if (GET_CODE (parameter) == REG)
28742 if (SCALAR_FLOAT_MODE_P (mode))
28744 int bits;
28746 float_parms++;
28748 switch (mode)
28750 case SFmode:
28751 case SDmode:
28752 bits = 0x2;
28753 break;
28755 case DFmode:
28756 case DDmode:
28757 case TFmode:
28758 case TDmode:
28759 case IFmode:
28760 case KFmode:
28761 bits = 0x3;
28762 break;
28764 default:
28765 gcc_unreachable ();
28768 /* If only one bit will fit, don't or in this entry. */
28769 if (next_parm_info_bit > 0)
28770 parm_info |= (bits << (next_parm_info_bit - 1));
28771 next_parm_info_bit -= 2;
28773 else
28775 fixed_parms += ((GET_MODE_SIZE (mode)
28776 + (UNITS_PER_WORD - 1))
28777 / UNITS_PER_WORD);
28778 next_parm_info_bit -= 1;
28784 /* Number of fixed point parameters. */
28785 /* This is actually the number of words of fixed point parameters; thus
28786 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28787 fprintf (file, "%d,", fixed_parms);
28789 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28790 all on stack. */
28791 /* This is actually the number of fp registers that hold parameters;
28792 and thus the maximum value is 13. */
28793 /* Set parameters on stack bit if parameters are not in their original
28794 registers, regardless of whether they are on the stack? Xlc
28795 seems to set the bit when not optimizing. */
28796 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28798 if (! optional_tbtab)
28799 return;
28801 /* Optional fields follow. Some are variable length. */
28803 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
28804 11 double float. */
28805 /* There is an entry for each parameter in a register, in the order that
28806 they occur in the parameter list. Any intervening arguments on the
28807 stack are ignored. If the list overflows a long (max possible length
28808 34 bits) then completely leave off all elements that don't fit. */
28809 /* Only emit this long if there was at least one parameter. */
28810 if (fixed_parms || float_parms)
28811 fprintf (file, "\t.long %d\n", parm_info);
28813 /* Offset from start of code to tb table. */
28814 fputs ("\t.long ", file);
28815 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28816 RS6000_OUTPUT_BASENAME (file, fname);
28817 putc ('-', file);
28818 rs6000_output_function_entry (file, fname);
28819 putc ('\n', file);
28821 /* Interrupt handler mask. */
28822 /* Omit this long, since we never set the interrupt handler bit
28823 above. */
28825 /* Number of CTL (controlled storage) anchors. */
28826 /* Omit this long, since the has_ctl bit is never set above. */
28828 /* Displacement into stack of each CTL anchor. */
28829 /* Omit this list of longs, because there are no CTL anchors. */
28831 /* Length of function name. */
28832 if (*fname == '*')
28833 ++fname;
28834 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28836 /* Function name. */
28837 assemble_string (fname, strlen (fname));
28839 /* Register for alloca automatic storage; this is always reg 31.
28840 Only emit this if the alloca bit was set above. */
28841 if (frame_pointer_needed)
28842 fputs ("\t.byte 31\n", file);
28844 fputs ("\t.align 2\n", file);
28847 /* Arrange to define .LCTOC1 label, if not already done. */
28848 if (need_toc_init)
28850 need_toc_init = 0;
28851 if (!toc_initialized)
28853 switch_to_section (toc_section);
28854 switch_to_section (current_function_section ());
28859 /* -fsplit-stack support. */
28861 /* A SYMBOL_REF for __morestack. */
28862 static GTY(()) rtx morestack_ref;
28864 static rtx
28865 gen_add3_const (rtx rt, rtx ra, long c)
28867 if (TARGET_64BIT)
28868 return gen_adddi3 (rt, ra, GEN_INT (c));
28869 else
28870 return gen_addsi3 (rt, ra, GEN_INT (c));
28873 /* Emit -fsplit-stack prologue, which goes before the regular function
28874 prologue (at local entry point in the case of ELFv2). */
28876 void
28877 rs6000_expand_split_stack_prologue (void)
28879 rs6000_stack_t *info = rs6000_stack_info ();
28880 unsigned HOST_WIDE_INT allocate;
28881 long alloc_hi, alloc_lo;
28882 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28883 rtx_insn *insn;
28885 gcc_assert (flag_split_stack && reload_completed);
28887 if (!info->push_p)
28888 return;
28890 if (global_regs[29])
28892 error ("-fsplit-stack uses register r29");
28893 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28894 "conflicts with %qD", global_regs_decl[29]);
28897 allocate = info->total_size;
28898 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28900 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28901 return;
28903 if (morestack_ref == NULL_RTX)
28905 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28906 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
28907 | SYMBOL_FLAG_FUNCTION);
28910 r0 = gen_rtx_REG (Pmode, 0);
28911 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28912 r12 = gen_rtx_REG (Pmode, 12);
28913 emit_insn (gen_load_split_stack_limit (r0));
28914 /* Always emit two insns here to calculate the requested stack,
28915 so that the linker can edit them when adjusting size for calling
28916 non-split-stack code. */
28917 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
28918 alloc_lo = -allocate - alloc_hi;
28919 if (alloc_hi != 0)
28921 emit_insn (gen_add3_const (r12, r1, alloc_hi));
28922 if (alloc_lo != 0)
28923 emit_insn (gen_add3_const (r12, r12, alloc_lo));
28924 else
28925 emit_insn (gen_nop ());
28927 else
28929 emit_insn (gen_add3_const (r12, r1, alloc_lo));
28930 emit_insn (gen_nop ());
28933 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
28934 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
28935 ok_label = gen_label_rtx ();
28936 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28937 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
28938 gen_rtx_LABEL_REF (VOIDmode, ok_label),
28939 pc_rtx);
28940 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28941 JUMP_LABEL (jump) = ok_label;
28942 /* Mark the jump as very likely to be taken. */
28943 add_int_reg_note (jump, REG_BR_PROB,
28944 REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100);
28946 lr = gen_rtx_REG (Pmode, LR_REGNO);
28947 insn = emit_move_insn (r0, lr);
28948 RTX_FRAME_RELATED_P (insn) = 1;
28949 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
28950 RTX_FRAME_RELATED_P (insn) = 1;
28952 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
28953 const0_rtx, const0_rtx));
28954 call_fusage = NULL_RTX;
28955 use_reg (&call_fusage, r12);
28956 /* Say the call uses r0, even though it doesn't, to stop regrename
28957 from twiddling with the insns saving lr, trashing args for cfun.
28958 The insns restoring lr are similarly protected by making
28959 split_stack_return use r0. */
28960 use_reg (&call_fusage, r0);
28961 add_function_usage_to (insn, call_fusage);
28962 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
28963 insn = emit_move_insn (lr, r0);
28964 add_reg_note (insn, REG_CFA_RESTORE, lr);
28965 RTX_FRAME_RELATED_P (insn) = 1;
28966 emit_insn (gen_split_stack_return ());
28968 emit_label (ok_label);
28969 LABEL_NUSES (ok_label) = 1;
28972 /* Return the internal arg pointer used for function incoming
28973 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28974 to copy it to a pseudo in order for it to be preserved over calls
28975 and suchlike. We'd really like to use a pseudo here for the
28976 internal arg pointer but data-flow analysis is not prepared to
28977 accept pseudos as live at the beginning of a function. */
28979 static rtx
28980 rs6000_internal_arg_pointer (void)
28982 if (flag_split_stack
28983 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
28984 == NULL))
28987 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
28989 rtx pat;
28991 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
28992 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
28994 /* Put the pseudo initialization right after the note at the
28995 beginning of the function. */
28996 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
28997 gen_rtx_REG (Pmode, 12));
28998 push_topmost_sequence ();
28999 emit_insn_after (pat, get_insns ());
29000 pop_topmost_sequence ();
29002 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29003 FIRST_PARM_OFFSET (current_function_decl));
29005 return virtual_incoming_args_rtx;
29008 /* We may have to tell the dataflow pass that the split stack prologue
29009 is initializing a register. */
29011 static void
29012 rs6000_live_on_entry (bitmap regs)
29014 if (flag_split_stack)
29015 bitmap_set_bit (regs, 12);
29018 /* Emit -fsplit-stack dynamic stack allocation space check. */
29020 void
29021 rs6000_split_stack_space_check (rtx size, rtx label)
29023 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29024 rtx limit = gen_reg_rtx (Pmode);
29025 rtx requested = gen_reg_rtx (Pmode);
29026 rtx cmp = gen_reg_rtx (CCUNSmode);
29027 rtx jump;
29029 emit_insn (gen_load_split_stack_limit (limit));
29030 if (CONST_INT_P (size))
29031 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29032 else
29034 size = force_reg (Pmode, size);
29035 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29037 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29038 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29039 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29040 gen_rtx_LABEL_REF (VOIDmode, label),
29041 pc_rtx);
29042 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29043 JUMP_LABEL (jump) = label;
29046 /* A C compound statement that outputs the assembler code for a thunk
29047 function, used to implement C++ virtual function calls with
29048 multiple inheritance. The thunk acts as a wrapper around a virtual
29049 function, adjusting the implicit object parameter before handing
29050 control off to the real function.
29052 First, emit code to add the integer DELTA to the location that
29053 contains the incoming first argument. Assume that this argument
29054 contains a pointer, and is the one used to pass the `this' pointer
29055 in C++. This is the incoming argument *before* the function
29056 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29057 values of all other incoming arguments.
29059 After the addition, emit code to jump to FUNCTION, which is a
29060 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29061 not touch the return address. Hence returning from FUNCTION will
29062 return to whoever called the current `thunk'.
29064 The effect must be as if FUNCTION had been called directly with the
29065 adjusted first argument. This macro is responsible for emitting
29066 all of the code for a thunk function; output_function_prologue()
29067 and output_function_epilogue() are not invoked.
29069 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29070 been extracted from it.) It might possibly be useful on some
29071 targets, but probably not.
29073 If you do not define this macro, the target-independent code in the
29074 C++ frontend will generate a less efficient heavyweight thunk that
29075 calls FUNCTION instead of jumping to it. The generic approach does
29076 not support varargs. */
29078 static void
29079 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29080 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29081 tree function)
29083 rtx this_rtx, funexp;
29084 rtx_insn *insn;
29086 reload_completed = 1;
29087 epilogue_completed = 1;
29089 /* Mark the end of the (empty) prologue. */
29090 emit_note (NOTE_INSN_PROLOGUE_END);
29092 /* Find the "this" pointer. If the function returns a structure,
29093 the structure return pointer is in r3. */
29094 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29095 this_rtx = gen_rtx_REG (Pmode, 4);
29096 else
29097 this_rtx = gen_rtx_REG (Pmode, 3);
29099 /* Apply the constant offset, if required. */
29100 if (delta)
29101 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29103 /* Apply the offset from the vtable, if required. */
29104 if (vcall_offset)
29106 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29107 rtx tmp = gen_rtx_REG (Pmode, 12);
29109 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29110 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29112 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29113 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29115 else
29117 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29119 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29121 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29124 /* Generate a tail call to the target function. */
29125 if (!TREE_USED (function))
29127 assemble_external (function);
29128 TREE_USED (function) = 1;
29130 funexp = XEXP (DECL_RTL (function), 0);
29131 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29133 #if TARGET_MACHO
29134 if (MACHOPIC_INDIRECT)
29135 funexp = machopic_indirect_call_target (funexp);
29136 #endif
29138 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29139 generate sibcall RTL explicitly. */
29140 insn = emit_call_insn (
29141 gen_rtx_PARALLEL (VOIDmode,
29142 gen_rtvec (4,
29143 gen_rtx_CALL (VOIDmode,
29144 funexp, const0_rtx),
29145 gen_rtx_USE (VOIDmode, const0_rtx),
29146 gen_rtx_USE (VOIDmode,
29147 gen_rtx_REG (SImode,
29148 LR_REGNO)),
29149 simple_return_rtx)));
29150 SIBLING_CALL_P (insn) = 1;
29151 emit_barrier ();
29153 /* Run just enough of rest_of_compilation to get the insns emitted.
29154 There's not really enough bulk here to make other passes such as
29155 instruction scheduling worth while. Note that use_thunk calls
29156 assemble_start_function and assemble_end_function. */
29157 insn = get_insns ();
29158 shorten_branches (insn);
29159 final_start_function (insn, file, 1);
29160 final (insn, file, 1);
29161 final_end_function ();
29163 reload_completed = 0;
29164 epilogue_completed = 0;
29167 /* A quick summary of the various types of 'constant-pool tables'
29168 under PowerPC:
29170 Target Flags Name One table per
29171 AIX (none) AIX TOC object file
29172 AIX -mfull-toc AIX TOC object file
29173 AIX -mminimal-toc AIX minimal TOC translation unit
29174 SVR4/EABI (none) SVR4 SDATA object file
29175 SVR4/EABI -fpic SVR4 pic object file
29176 SVR4/EABI -fPIC SVR4 PIC translation unit
29177 SVR4/EABI -mrelocatable EABI TOC function
29178 SVR4/EABI -maix AIX TOC object file
29179 SVR4/EABI -maix -mminimal-toc
29180 AIX minimal TOC translation unit
29182 Name Reg. Set by entries contains:
29183 made by addrs? fp? sum?
29185 AIX TOC 2 crt0 as Y option option
29186 AIX minimal TOC 30 prolog gcc Y Y option
29187 SVR4 SDATA 13 crt0 gcc N Y N
29188 SVR4 pic 30 prolog ld Y not yet N
29189 SVR4 PIC 30 prolog gcc Y option option
29190 EABI TOC 30 prolog gcc Y option option
29194 /* Hash functions for the hash table. */
29196 static unsigned
29197 rs6000_hash_constant (rtx k)
29199 enum rtx_code code = GET_CODE (k);
29200 machine_mode mode = GET_MODE (k);
29201 unsigned result = (code << 3) ^ mode;
29202 const char *format;
29203 int flen, fidx;
29205 format = GET_RTX_FORMAT (code);
29206 flen = strlen (format);
29207 fidx = 0;
29209 switch (code)
29211 case LABEL_REF:
29212 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29214 case CONST_WIDE_INT:
29216 int i;
29217 flen = CONST_WIDE_INT_NUNITS (k);
29218 for (i = 0; i < flen; i++)
29219 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29220 return result;
29223 case CONST_DOUBLE:
29224 if (mode != VOIDmode)
29225 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29226 flen = 2;
29227 break;
29229 case CODE_LABEL:
29230 fidx = 3;
29231 break;
29233 default:
29234 break;
29237 for (; fidx < flen; fidx++)
29238 switch (format[fidx])
29240 case 's':
29242 unsigned i, len;
29243 const char *str = XSTR (k, fidx);
29244 len = strlen (str);
29245 result = result * 613 + len;
29246 for (i = 0; i < len; i++)
29247 result = result * 613 + (unsigned) str[i];
29248 break;
29250 case 'u':
29251 case 'e':
29252 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29253 break;
29254 case 'i':
29255 case 'n':
29256 result = result * 613 + (unsigned) XINT (k, fidx);
29257 break;
29258 case 'w':
29259 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29260 result = result * 613 + (unsigned) XWINT (k, fidx);
29261 else
29263 size_t i;
29264 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29265 result = result * 613 + (unsigned) (XWINT (k, fidx)
29266 >> CHAR_BIT * i);
29268 break;
29269 case '0':
29270 break;
29271 default:
29272 gcc_unreachable ();
29275 return result;
29278 hashval_t
29279 toc_hasher::hash (toc_hash_struct *thc)
29281 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29284 /* Compare H1 and H2 for equivalence. */
29286 bool
29287 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29289 rtx r1 = h1->key;
29290 rtx r2 = h2->key;
29292 if (h1->key_mode != h2->key_mode)
29293 return 0;
29295 return rtx_equal_p (r1, r2);
29298 /* These are the names given by the C++ front-end to vtables, and
29299 vtable-like objects. Ideally, this logic should not be here;
29300 instead, there should be some programmatic way of inquiring as
29301 to whether or not an object is a vtable. */
29303 #define VTABLE_NAME_P(NAME) \
29304 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29305 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29306 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29307 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29308 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29310 #ifdef NO_DOLLAR_IN_LABEL
29311 /* Return a GGC-allocated character string translating dollar signs in
29312 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29314 const char *
29315 rs6000_xcoff_strip_dollar (const char *name)
29317 char *strip, *p;
29318 const char *q;
29319 size_t len;
29321 q = (const char *) strchr (name, '$');
29323 if (q == 0 || q == name)
29324 return name;
29326 len = strlen (name);
29327 strip = XALLOCAVEC (char, len + 1);
29328 strcpy (strip, name);
29329 p = strip + (q - name);
29330 while (p)
29332 *p = '_';
29333 p = strchr (p + 1, '$');
29336 return ggc_alloc_string (strip, len);
29338 #endif
29340 void
29341 rs6000_output_symbol_ref (FILE *file, rtx x)
29343 /* Currently C++ toc references to vtables can be emitted before it
29344 is decided whether the vtable is public or private. If this is
29345 the case, then the linker will eventually complain that there is
29346 a reference to an unknown section. Thus, for vtables only,
29347 we emit the TOC reference to reference the symbol and not the
29348 section. */
29349 const char *name = XSTR (x, 0);
29351 tree decl = SYMBOL_REF_DECL (x);
29352 if (decl /* sync condition with assemble_external () */
29353 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
29354 && (TREE_CODE (decl) == VAR_DECL
29355 || TREE_CODE (decl) == FUNCTION_DECL)
29356 && name[strlen (name) - 1] != ']')
29358 name = concat (name,
29359 (TREE_CODE (decl) == FUNCTION_DECL
29360 ? "[DS]" : "[UA]"),
29361 NULL);
29362 XSTR (x, 0) = name;
29365 if (VTABLE_NAME_P (name))
29367 RS6000_OUTPUT_BASENAME (file, name);
29369 else
29370 assemble_name (file, name);
29373 /* Output a TOC entry. We derive the entry name from what is being
29374 written. */
29376 void
29377 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29379 char buf[256];
29380 const char *name = buf;
29381 rtx base = x;
29382 HOST_WIDE_INT offset = 0;
29384 gcc_assert (!TARGET_NO_TOC);
29386 /* When the linker won't eliminate them, don't output duplicate
29387 TOC entries (this happens on AIX if there is any kind of TOC,
29388 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29389 CODE_LABELs. */
29390 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29392 struct toc_hash_struct *h;
29394 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29395 time because GGC is not initialized at that point. */
29396 if (toc_hash_table == NULL)
29397 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29399 h = ggc_alloc<toc_hash_struct> ();
29400 h->key = x;
29401 h->key_mode = mode;
29402 h->labelno = labelno;
29404 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29405 if (*found == NULL)
29406 *found = h;
29407 else /* This is indeed a duplicate.
29408 Set this label equal to that label. */
29410 fputs ("\t.set ", file);
29411 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29412 fprintf (file, "%d,", labelno);
29413 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29414 fprintf (file, "%d\n", ((*found)->labelno));
29416 #ifdef HAVE_AS_TLS
29417 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29418 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29419 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29421 fputs ("\t.set ", file);
29422 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29423 fprintf (file, "%d,", labelno);
29424 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29425 fprintf (file, "%d\n", ((*found)->labelno));
29427 #endif
29428 return;
29432 /* If we're going to put a double constant in the TOC, make sure it's
29433 aligned properly when strict alignment is on. */
29434 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29435 && STRICT_ALIGNMENT
29436 && GET_MODE_BITSIZE (mode) >= 64
29437 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29438 ASM_OUTPUT_ALIGN (file, 3);
29441 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29443 /* Handle FP constants specially. Note that if we have a minimal
29444 TOC, things we put here aren't actually in the TOC, so we can allow
29445 FP constants. */
29446 if (GET_CODE (x) == CONST_DOUBLE &&
29447 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29448 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29450 long k[4];
29452 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29453 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29454 else
29455 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29457 if (TARGET_64BIT)
29459 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29460 fputs (DOUBLE_INT_ASM_OP, file);
29461 else
29462 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29463 k[0] & 0xffffffff, k[1] & 0xffffffff,
29464 k[2] & 0xffffffff, k[3] & 0xffffffff);
29465 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29466 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29467 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29468 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29469 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29470 return;
29472 else
29474 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29475 fputs ("\t.long ", file);
29476 else
29477 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29478 k[0] & 0xffffffff, k[1] & 0xffffffff,
29479 k[2] & 0xffffffff, k[3] & 0xffffffff);
29480 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29481 k[0] & 0xffffffff, k[1] & 0xffffffff,
29482 k[2] & 0xffffffff, k[3] & 0xffffffff);
29483 return;
29486 else if (GET_CODE (x) == CONST_DOUBLE &&
29487 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29489 long k[2];
29491 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29492 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29493 else
29494 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29496 if (TARGET_64BIT)
29498 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29499 fputs (DOUBLE_INT_ASM_OP, file);
29500 else
29501 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29502 k[0] & 0xffffffff, k[1] & 0xffffffff);
29503 fprintf (file, "0x%lx%08lx\n",
29504 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29505 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29506 return;
29508 else
29510 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29511 fputs ("\t.long ", file);
29512 else
29513 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29514 k[0] & 0xffffffff, k[1] & 0xffffffff);
29515 fprintf (file, "0x%lx,0x%lx\n",
29516 k[0] & 0xffffffff, k[1] & 0xffffffff);
29517 return;
29520 else if (GET_CODE (x) == CONST_DOUBLE &&
29521 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29523 long l;
29525 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29526 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29527 else
29528 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29530 if (TARGET_64BIT)
29532 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29533 fputs (DOUBLE_INT_ASM_OP, file);
29534 else
29535 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29536 if (WORDS_BIG_ENDIAN)
29537 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29538 else
29539 fprintf (file, "0x%lx\n", l & 0xffffffff);
29540 return;
29542 else
29544 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29545 fputs ("\t.long ", file);
29546 else
29547 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29548 fprintf (file, "0x%lx\n", l & 0xffffffff);
29549 return;
29552 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29554 unsigned HOST_WIDE_INT low;
29555 HOST_WIDE_INT high;
29557 low = INTVAL (x) & 0xffffffff;
29558 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29560 /* TOC entries are always Pmode-sized, so when big-endian
29561 smaller integer constants in the TOC need to be padded.
29562 (This is still a win over putting the constants in
29563 a separate constant pool, because then we'd have
29564 to have both a TOC entry _and_ the actual constant.)
29566 For a 32-bit target, CONST_INT values are loaded and shifted
29567 entirely within `low' and can be stored in one TOC entry. */
29569 /* It would be easy to make this work, but it doesn't now. */
29570 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29572 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29574 low |= high << 32;
29575 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29576 high = (HOST_WIDE_INT) low >> 32;
29577 low &= 0xffffffff;
29580 if (TARGET_64BIT)
29582 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29583 fputs (DOUBLE_INT_ASM_OP, file);
29584 else
29585 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29586 (long) high & 0xffffffff, (long) low & 0xffffffff);
29587 fprintf (file, "0x%lx%08lx\n",
29588 (long) high & 0xffffffff, (long) low & 0xffffffff);
29589 return;
29591 else
29593 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29595 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29596 fputs ("\t.long ", file);
29597 else
29598 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29599 (long) high & 0xffffffff, (long) low & 0xffffffff);
29600 fprintf (file, "0x%lx,0x%lx\n",
29601 (long) high & 0xffffffff, (long) low & 0xffffffff);
29603 else
29605 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29606 fputs ("\t.long ", file);
29607 else
29608 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29609 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29611 return;
29615 if (GET_CODE (x) == CONST)
29617 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29618 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29620 base = XEXP (XEXP (x, 0), 0);
29621 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29624 switch (GET_CODE (base))
29626 case SYMBOL_REF:
29627 name = XSTR (base, 0);
29628 break;
29630 case LABEL_REF:
29631 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29632 CODE_LABEL_NUMBER (XEXP (base, 0)));
29633 break;
29635 case CODE_LABEL:
29636 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29637 break;
29639 default:
29640 gcc_unreachable ();
29643 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29644 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29645 else
29647 fputs ("\t.tc ", file);
29648 RS6000_OUTPUT_BASENAME (file, name);
29650 if (offset < 0)
29651 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29652 else if (offset)
29653 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29655 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29656 after other TOC symbols, reducing overflow of small TOC access
29657 to [TC] symbols. */
29658 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29659 ? "[TE]," : "[TC],", file);
29662 /* Currently C++ toc references to vtables can be emitted before it
29663 is decided whether the vtable is public or private. If this is
29664 the case, then the linker will eventually complain that there is
29665 a TOC reference to an unknown section. Thus, for vtables only,
29666 we emit the TOC reference to reference the symbol and not the
29667 section. */
29668 if (VTABLE_NAME_P (name))
29670 RS6000_OUTPUT_BASENAME (file, name);
29671 if (offset < 0)
29672 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29673 else if (offset > 0)
29674 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29676 else
29677 output_addr_const (file, x);
29679 #if HAVE_AS_TLS
29680 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29682 switch (SYMBOL_REF_TLS_MODEL (base))
29684 case 0:
29685 break;
29686 case TLS_MODEL_LOCAL_EXEC:
29687 fputs ("@le", file);
29688 break;
29689 case TLS_MODEL_INITIAL_EXEC:
29690 fputs ("@ie", file);
29691 break;
29692 /* Use global-dynamic for local-dynamic. */
29693 case TLS_MODEL_GLOBAL_DYNAMIC:
29694 case TLS_MODEL_LOCAL_DYNAMIC:
29695 putc ('\n', file);
29696 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29697 fputs ("\t.tc .", file);
29698 RS6000_OUTPUT_BASENAME (file, name);
29699 fputs ("[TC],", file);
29700 output_addr_const (file, x);
29701 fputs ("@m", file);
29702 break;
29703 default:
29704 gcc_unreachable ();
29707 #endif
29709 putc ('\n', file);
29712 /* Output an assembler pseudo-op to write an ASCII string of N characters
29713 starting at P to FILE.
29715 On the RS/6000, we have to do this using the .byte operation and
29716 write out special characters outside the quoted string.
29717 Also, the assembler is broken; very long strings are truncated,
29718 so we must artificially break them up early. */
29720 void
29721 output_ascii (FILE *file, const char *p, int n)
29723 char c;
29724 int i, count_string;
29725 const char *for_string = "\t.byte \"";
29726 const char *for_decimal = "\t.byte ";
29727 const char *to_close = NULL;
29729 count_string = 0;
29730 for (i = 0; i < n; i++)
29732 c = *p++;
29733 if (c >= ' ' && c < 0177)
29735 if (for_string)
29736 fputs (for_string, file);
29737 putc (c, file);
29739 /* Write two quotes to get one. */
29740 if (c == '"')
29742 putc (c, file);
29743 ++count_string;
29746 for_string = NULL;
29747 for_decimal = "\"\n\t.byte ";
29748 to_close = "\"\n";
29749 ++count_string;
29751 if (count_string >= 512)
29753 fputs (to_close, file);
29755 for_string = "\t.byte \"";
29756 for_decimal = "\t.byte ";
29757 to_close = NULL;
29758 count_string = 0;
29761 else
29763 if (for_decimal)
29764 fputs (for_decimal, file);
29765 fprintf (file, "%d", c);
29767 for_string = "\n\t.byte \"";
29768 for_decimal = ", ";
29769 to_close = "\n";
29770 count_string = 0;
29774 /* Now close the string if we have written one. Then end the line. */
29775 if (to_close)
29776 fputs (to_close, file);
29779 /* Generate a unique section name for FILENAME for a section type
29780 represented by SECTION_DESC. Output goes into BUF.
29782 SECTION_DESC can be any string, as long as it is different for each
29783 possible section type.
29785 We name the section in the same manner as xlc. The name begins with an
29786 underscore followed by the filename (after stripping any leading directory
29787 names) with the last period replaced by the string SECTION_DESC. If
29788 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29789 the name. */
29791 void
29792 rs6000_gen_section_name (char **buf, const char *filename,
29793 const char *section_desc)
29795 const char *q, *after_last_slash, *last_period = 0;
29796 char *p;
29797 int len;
29799 after_last_slash = filename;
29800 for (q = filename; *q; q++)
29802 if (*q == '/')
29803 after_last_slash = q + 1;
29804 else if (*q == '.')
29805 last_period = q;
29808 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29809 *buf = (char *) xmalloc (len);
29811 p = *buf;
29812 *p++ = '_';
29814 for (q = after_last_slash; *q; q++)
29816 if (q == last_period)
29818 strcpy (p, section_desc);
29819 p += strlen (section_desc);
29820 break;
29823 else if (ISALNUM (*q))
29824 *p++ = *q;
29827 if (last_period == 0)
29828 strcpy (p, section_desc);
29829 else
29830 *p = '\0';
29833 /* Emit profile function. */
29835 void
29836 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29838 /* Non-standard profiling for kernels, which just saves LR then calls
29839 _mcount without worrying about arg saves. The idea is to change
29840 the function prologue as little as possible as it isn't easy to
29841 account for arg save/restore code added just for _mcount. */
29842 if (TARGET_PROFILE_KERNEL)
29843 return;
29845 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29847 #ifndef NO_PROFILE_COUNTERS
29848 # define NO_PROFILE_COUNTERS 0
29849 #endif
29850 if (NO_PROFILE_COUNTERS)
29851 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29852 LCT_NORMAL, VOIDmode, 0);
29853 else
29855 char buf[30];
29856 const char *label_name;
29857 rtx fun;
29859 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29860 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29861 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29863 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29864 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
29867 else if (DEFAULT_ABI == ABI_DARWIN)
29869 const char *mcount_name = RS6000_MCOUNT;
29870 int caller_addr_regno = LR_REGNO;
29872 /* Be conservative and always set this, at least for now. */
29873 crtl->uses_pic_offset_table = 1;
29875 #if TARGET_MACHO
29876 /* For PIC code, set up a stub and collect the caller's address
29877 from r0, which is where the prologue puts it. */
29878 if (MACHOPIC_INDIRECT
29879 && crtl->uses_pic_offset_table)
29880 caller_addr_regno = 0;
29881 #endif
29882 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29883 LCT_NORMAL, VOIDmode, 1,
29884 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29888 /* Write function profiler code. */
29890 void
29891 output_function_profiler (FILE *file, int labelno)
29893 char buf[100];
29895 switch (DEFAULT_ABI)
29897 default:
29898 gcc_unreachable ();
29900 case ABI_V4:
29901 if (!TARGET_32BIT)
29903 warning (0, "no profiling of 64-bit code for this ABI");
29904 return;
29906 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29907 fprintf (file, "\tmflr %s\n", reg_names[0]);
29908 if (NO_PROFILE_COUNTERS)
29910 asm_fprintf (file, "\tstw %s,4(%s)\n",
29911 reg_names[0], reg_names[1]);
29913 else if (TARGET_SECURE_PLT && flag_pic)
29915 if (TARGET_LINK_STACK)
29917 char name[32];
29918 get_ppc476_thunk_name (name);
29919 asm_fprintf (file, "\tbl %s\n", name);
29921 else
29922 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
29923 asm_fprintf (file, "\tstw %s,4(%s)\n",
29924 reg_names[0], reg_names[1]);
29925 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29926 asm_fprintf (file, "\taddis %s,%s,",
29927 reg_names[12], reg_names[12]);
29928 assemble_name (file, buf);
29929 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
29930 assemble_name (file, buf);
29931 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
29933 else if (flag_pic == 1)
29935 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
29936 asm_fprintf (file, "\tstw %s,4(%s)\n",
29937 reg_names[0], reg_names[1]);
29938 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29939 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
29940 assemble_name (file, buf);
29941 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
29943 else if (flag_pic > 1)
29945 asm_fprintf (file, "\tstw %s,4(%s)\n",
29946 reg_names[0], reg_names[1]);
29947 /* Now, we need to get the address of the label. */
29948 if (TARGET_LINK_STACK)
29950 char name[32];
29951 get_ppc476_thunk_name (name);
29952 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
29953 assemble_name (file, buf);
29954 fputs ("-.\n1:", file);
29955 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29956 asm_fprintf (file, "\taddi %s,%s,4\n",
29957 reg_names[11], reg_names[11]);
29959 else
29961 fputs ("\tbcl 20,31,1f\n\t.long ", file);
29962 assemble_name (file, buf);
29963 fputs ("-.\n1:", file);
29964 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29966 asm_fprintf (file, "\tlwz %s,0(%s)\n",
29967 reg_names[0], reg_names[11]);
29968 asm_fprintf (file, "\tadd %s,%s,%s\n",
29969 reg_names[0], reg_names[0], reg_names[11]);
29971 else
29973 asm_fprintf (file, "\tlis %s,", reg_names[12]);
29974 assemble_name (file, buf);
29975 fputs ("@ha\n", file);
29976 asm_fprintf (file, "\tstw %s,4(%s)\n",
29977 reg_names[0], reg_names[1]);
29978 asm_fprintf (file, "\tla %s,", reg_names[0]);
29979 assemble_name (file, buf);
29980 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
29983 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29984 fprintf (file, "\tbl %s%s\n",
29985 RS6000_MCOUNT, flag_pic ? "@plt" : "");
29986 break;
29988 case ABI_AIX:
29989 case ABI_ELFv2:
29990 case ABI_DARWIN:
29991 /* Don't do anything, done in output_profile_hook (). */
29992 break;
29998 /* The following variable value is the last issued insn. */
30000 static rtx_insn *last_scheduled_insn;
30002 /* The following variable helps to balance issuing of load and
30003 store instructions */
30005 static int load_store_pendulum;
30007 /* The following variable helps pair divide insns during scheduling. */
30008 static int divide_cnt;
30009 /* The following variable helps pair and alternate vector and vector load
30010 insns during scheduling. */
30011 static int vec_load_pendulum;
30014 /* Power4 load update and store update instructions are cracked into a
30015 load or store and an integer insn which are executed in the same cycle.
30016 Branches have their own dispatch slot which does not count against the
30017 GCC issue rate, but it changes the program flow so there are no other
30018 instructions to issue in this cycle. */
30020 static int
30021 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30023 last_scheduled_insn = insn;
30024 if (GET_CODE (PATTERN (insn)) == USE
30025 || GET_CODE (PATTERN (insn)) == CLOBBER)
30027 cached_can_issue_more = more;
30028 return cached_can_issue_more;
30031 if (insn_terminates_group_p (insn, current_group))
30033 cached_can_issue_more = 0;
30034 return cached_can_issue_more;
30037 /* If no reservation, but reach here */
30038 if (recog_memoized (insn) < 0)
30039 return more;
30041 if (rs6000_sched_groups)
30043 if (is_microcoded_insn (insn))
30044 cached_can_issue_more = 0;
30045 else if (is_cracked_insn (insn))
30046 cached_can_issue_more = more > 2 ? more - 2 : 0;
30047 else
30048 cached_can_issue_more = more - 1;
30050 return cached_can_issue_more;
30053 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30054 return 0;
30056 cached_can_issue_more = more - 1;
30057 return cached_can_issue_more;
30060 static int
30061 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30063 int r = rs6000_variable_issue_1 (insn, more);
30064 if (verbose)
30065 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30066 return r;
30069 /* Adjust the cost of a scheduling dependency. Return the new cost of
30070 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30072 static int
30073 rs6000_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
30075 enum attr_type attr_type;
30077 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30078 return cost;
30080 switch (REG_NOTE_KIND (link))
30082 case REG_DEP_TRUE:
30084 /* Data dependency; DEP_INSN writes a register that INSN reads
30085 some cycles later. */
30087 /* Separate a load from a narrower, dependent store. */
30088 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30089 && GET_CODE (PATTERN (insn)) == SET
30090 && GET_CODE (PATTERN (dep_insn)) == SET
30091 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30092 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30093 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30094 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30095 return cost + 14;
30097 attr_type = get_attr_type (insn);
30099 switch (attr_type)
30101 case TYPE_JMPREG:
30102 /* Tell the first scheduling pass about the latency between
30103 a mtctr and bctr (and mtlr and br/blr). The first
30104 scheduling pass will not know about this latency since
30105 the mtctr instruction, which has the latency associated
30106 to it, will be generated by reload. */
30107 return 4;
30108 case TYPE_BRANCH:
30109 /* Leave some extra cycles between a compare and its
30110 dependent branch, to inhibit expensive mispredicts. */
30111 if ((rs6000_cpu_attr == CPU_PPC603
30112 || rs6000_cpu_attr == CPU_PPC604
30113 || rs6000_cpu_attr == CPU_PPC604E
30114 || rs6000_cpu_attr == CPU_PPC620
30115 || rs6000_cpu_attr == CPU_PPC630
30116 || rs6000_cpu_attr == CPU_PPC750
30117 || rs6000_cpu_attr == CPU_PPC7400
30118 || rs6000_cpu_attr == CPU_PPC7450
30119 || rs6000_cpu_attr == CPU_PPCE5500
30120 || rs6000_cpu_attr == CPU_PPCE6500
30121 || rs6000_cpu_attr == CPU_POWER4
30122 || rs6000_cpu_attr == CPU_POWER5
30123 || rs6000_cpu_attr == CPU_POWER7
30124 || rs6000_cpu_attr == CPU_POWER8
30125 || rs6000_cpu_attr == CPU_POWER9
30126 || rs6000_cpu_attr == CPU_CELL)
30127 && recog_memoized (dep_insn)
30128 && (INSN_CODE (dep_insn) >= 0))
30130 switch (get_attr_type (dep_insn))
30132 case TYPE_CMP:
30133 case TYPE_FPCOMPARE:
30134 case TYPE_CR_LOGICAL:
30135 case TYPE_DELAYED_CR:
30136 return cost + 2;
30137 case TYPE_EXTS:
30138 case TYPE_MUL:
30139 if (get_attr_dot (dep_insn) == DOT_YES)
30140 return cost + 2;
30141 else
30142 break;
30143 case TYPE_SHIFT:
30144 if (get_attr_dot (dep_insn) == DOT_YES
30145 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30146 return cost + 2;
30147 else
30148 break;
30149 default:
30150 break;
30152 break;
30154 case TYPE_STORE:
30155 case TYPE_FPSTORE:
30156 if ((rs6000_cpu == PROCESSOR_POWER6)
30157 && recog_memoized (dep_insn)
30158 && (INSN_CODE (dep_insn) >= 0))
30161 if (GET_CODE (PATTERN (insn)) != SET)
30162 /* If this happens, we have to extend this to schedule
30163 optimally. Return default for now. */
30164 return cost;
30166 /* Adjust the cost for the case where the value written
30167 by a fixed point operation is used as the address
30168 gen value on a store. */
30169 switch (get_attr_type (dep_insn))
30171 case TYPE_LOAD:
30172 case TYPE_CNTLZ:
30174 if (! store_data_bypass_p (dep_insn, insn))
30175 return get_attr_sign_extend (dep_insn)
30176 == SIGN_EXTEND_YES ? 6 : 4;
30177 break;
30179 case TYPE_SHIFT:
30181 if (! store_data_bypass_p (dep_insn, insn))
30182 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30183 6 : 3;
30184 break;
30186 case TYPE_INTEGER:
30187 case TYPE_ADD:
30188 case TYPE_LOGICAL:
30189 case TYPE_EXTS:
30190 case TYPE_INSERT:
30192 if (! store_data_bypass_p (dep_insn, insn))
30193 return 3;
30194 break;
30196 case TYPE_STORE:
30197 case TYPE_FPLOAD:
30198 case TYPE_FPSTORE:
30200 if (get_attr_update (dep_insn) == UPDATE_YES
30201 && ! store_data_bypass_p (dep_insn, insn))
30202 return 3;
30203 break;
30205 case TYPE_MUL:
30207 if (! store_data_bypass_p (dep_insn, insn))
30208 return 17;
30209 break;
30211 case TYPE_DIV:
30213 if (! store_data_bypass_p (dep_insn, insn))
30214 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30215 break;
30217 default:
30218 break;
30221 break;
30223 case TYPE_LOAD:
30224 if ((rs6000_cpu == PROCESSOR_POWER6)
30225 && recog_memoized (dep_insn)
30226 && (INSN_CODE (dep_insn) >= 0))
30229 /* Adjust the cost for the case where the value written
30230 by a fixed point instruction is used within the address
30231 gen portion of a subsequent load(u)(x) */
30232 switch (get_attr_type (dep_insn))
30234 case TYPE_LOAD:
30235 case TYPE_CNTLZ:
30237 if (set_to_load_agen (dep_insn, insn))
30238 return get_attr_sign_extend (dep_insn)
30239 == SIGN_EXTEND_YES ? 6 : 4;
30240 break;
30242 case TYPE_SHIFT:
30244 if (set_to_load_agen (dep_insn, insn))
30245 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30246 6 : 3;
30247 break;
30249 case TYPE_INTEGER:
30250 case TYPE_ADD:
30251 case TYPE_LOGICAL:
30252 case TYPE_EXTS:
30253 case TYPE_INSERT:
30255 if (set_to_load_agen (dep_insn, insn))
30256 return 3;
30257 break;
30259 case TYPE_STORE:
30260 case TYPE_FPLOAD:
30261 case TYPE_FPSTORE:
30263 if (get_attr_update (dep_insn) == UPDATE_YES
30264 && set_to_load_agen (dep_insn, insn))
30265 return 3;
30266 break;
30268 case TYPE_MUL:
30270 if (set_to_load_agen (dep_insn, insn))
30271 return 17;
30272 break;
30274 case TYPE_DIV:
30276 if (set_to_load_agen (dep_insn, insn))
30277 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30278 break;
30280 default:
30281 break;
30284 break;
30286 case TYPE_FPLOAD:
30287 if ((rs6000_cpu == PROCESSOR_POWER6)
30288 && get_attr_update (insn) == UPDATE_NO
30289 && recog_memoized (dep_insn)
30290 && (INSN_CODE (dep_insn) >= 0)
30291 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30292 return 2;
30294 default:
30295 break;
30298 /* Fall out to return default cost. */
30300 break;
30302 case REG_DEP_OUTPUT:
30303 /* Output dependency; DEP_INSN writes a register that INSN writes some
30304 cycles later. */
30305 if ((rs6000_cpu == PROCESSOR_POWER6)
30306 && recog_memoized (dep_insn)
30307 && (INSN_CODE (dep_insn) >= 0))
30309 attr_type = get_attr_type (insn);
30311 switch (attr_type)
30313 case TYPE_FP:
30314 case TYPE_FPSIMPLE:
30315 if (get_attr_type (dep_insn) == TYPE_FP
30316 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30317 return 1;
30318 break;
30319 case TYPE_FPLOAD:
30320 if (get_attr_update (insn) == UPDATE_NO
30321 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30322 return 2;
30323 break;
30324 default:
30325 break;
30328 /* Fall through, no cost for output dependency. */
30330 case REG_DEP_ANTI:
30331 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30332 cycles later. */
30333 return 0;
30335 default:
30336 gcc_unreachable ();
30339 return cost;
30342 /* Debug version of rs6000_adjust_cost. */
30344 static int
30345 rs6000_debug_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn,
30346 int cost)
30348 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
30350 if (ret != cost)
30352 const char *dep;
30354 switch (REG_NOTE_KIND (link))
30356 default: dep = "unknown depencency"; break;
30357 case REG_DEP_TRUE: dep = "data dependency"; break;
30358 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30359 case REG_DEP_ANTI: dep = "anti depencency"; break;
30362 fprintf (stderr,
30363 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30364 "%s, insn:\n", ret, cost, dep);
30366 debug_rtx (insn);
30369 return ret;
30372 /* The function returns a true if INSN is microcoded.
30373 Return false otherwise. */
30375 static bool
30376 is_microcoded_insn (rtx_insn *insn)
30378 if (!insn || !NONDEBUG_INSN_P (insn)
30379 || GET_CODE (PATTERN (insn)) == USE
30380 || GET_CODE (PATTERN (insn)) == CLOBBER)
30381 return false;
30383 if (rs6000_cpu_attr == CPU_CELL)
30384 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30386 if (rs6000_sched_groups
30387 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30389 enum attr_type type = get_attr_type (insn);
30390 if ((type == TYPE_LOAD
30391 && get_attr_update (insn) == UPDATE_YES
30392 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30393 || ((type == TYPE_LOAD || type == TYPE_STORE)
30394 && get_attr_update (insn) == UPDATE_YES
30395 && get_attr_indexed (insn) == INDEXED_YES)
30396 || type == TYPE_MFCR)
30397 return true;
30400 return false;
30403 /* The function returns true if INSN is cracked into 2 instructions
30404 by the processor (and therefore occupies 2 issue slots). */
30406 static bool
30407 is_cracked_insn (rtx_insn *insn)
30409 if (!insn || !NONDEBUG_INSN_P (insn)
30410 || GET_CODE (PATTERN (insn)) == USE
30411 || GET_CODE (PATTERN (insn)) == CLOBBER)
30412 return false;
30414 if (rs6000_sched_groups
30415 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30417 enum attr_type type = get_attr_type (insn);
30418 if ((type == TYPE_LOAD
30419 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30420 && get_attr_update (insn) == UPDATE_NO)
30421 || (type == TYPE_LOAD
30422 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30423 && get_attr_update (insn) == UPDATE_YES
30424 && get_attr_indexed (insn) == INDEXED_NO)
30425 || (type == TYPE_STORE
30426 && get_attr_update (insn) == UPDATE_YES
30427 && get_attr_indexed (insn) == INDEXED_NO)
30428 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30429 && get_attr_update (insn) == UPDATE_YES)
30430 || type == TYPE_DELAYED_CR
30431 || (type == TYPE_EXTS
30432 && get_attr_dot (insn) == DOT_YES)
30433 || (type == TYPE_SHIFT
30434 && get_attr_dot (insn) == DOT_YES
30435 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30436 || (type == TYPE_MUL
30437 && get_attr_dot (insn) == DOT_YES)
30438 || type == TYPE_DIV
30439 || (type == TYPE_INSERT
30440 && get_attr_size (insn) == SIZE_32))
30441 return true;
30444 return false;
30447 /* The function returns true if INSN can be issued only from
30448 the branch slot. */
30450 static bool
30451 is_branch_slot_insn (rtx_insn *insn)
30453 if (!insn || !NONDEBUG_INSN_P (insn)
30454 || GET_CODE (PATTERN (insn)) == USE
30455 || GET_CODE (PATTERN (insn)) == CLOBBER)
30456 return false;
30458 if (rs6000_sched_groups)
30460 enum attr_type type = get_attr_type (insn);
30461 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30462 return true;
30463 return false;
30466 return false;
30469 /* The function returns true if out_inst sets a value that is
30470 used in the address generation computation of in_insn */
30471 static bool
30472 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30474 rtx out_set, in_set;
30476 /* For performance reasons, only handle the simple case where
30477 both loads are a single_set. */
30478 out_set = single_set (out_insn);
30479 if (out_set)
30481 in_set = single_set (in_insn);
30482 if (in_set)
30483 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30486 return false;
30489 /* Try to determine base/offset/size parts of the given MEM.
30490 Return true if successful, false if all the values couldn't
30491 be determined.
30493 This function only looks for REG or REG+CONST address forms.
30494 REG+REG address form will return false. */
30496 static bool
30497 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30498 HOST_WIDE_INT *size)
30500 rtx addr_rtx;
30501 if MEM_SIZE_KNOWN_P (mem)
30502 *size = MEM_SIZE (mem);
30503 else
30504 return false;
30506 addr_rtx = (XEXP (mem, 0));
30507 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30508 addr_rtx = XEXP (addr_rtx, 1);
30510 *offset = 0;
30511 while (GET_CODE (addr_rtx) == PLUS
30512 && CONST_INT_P (XEXP (addr_rtx, 1)))
30514 *offset += INTVAL (XEXP (addr_rtx, 1));
30515 addr_rtx = XEXP (addr_rtx, 0);
30517 if (!REG_P (addr_rtx))
30518 return false;
30520 *base = addr_rtx;
30521 return true;
30524 /* The function returns true if the target storage location of
30525 mem1 is adjacent to the target storage location of mem2 */
30526 /* Return 1 if memory locations are adjacent. */
30528 static bool
30529 adjacent_mem_locations (rtx mem1, rtx mem2)
30531 rtx reg1, reg2;
30532 HOST_WIDE_INT off1, size1, off2, size2;
30534 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30535 && get_memref_parts (mem2, &reg2, &off2, &size2))
30536 return ((REGNO (reg1) == REGNO (reg2))
30537 && ((off1 + size1 == off2)
30538 || (off2 + size2 == off1)));
30540 return false;
30543 /* This function returns true if it can be determined that the two MEM
30544 locations overlap by at least 1 byte based on base reg/offset/size. */
30546 static bool
30547 mem_locations_overlap (rtx mem1, rtx mem2)
30549 rtx reg1, reg2;
30550 HOST_WIDE_INT off1, size1, off2, size2;
30552 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30553 && get_memref_parts (mem2, &reg2, &off2, &size2))
30554 return ((REGNO (reg1) == REGNO (reg2))
30555 && (((off1 <= off2) && (off1 + size1 > off2))
30556 || ((off2 <= off1) && (off2 + size2 > off1))));
30558 return false;
30561 /* A C statement (sans semicolon) to update the integer scheduling
30562 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30563 INSN earlier, reduce the priority to execute INSN later. Do not
30564 define this macro if you do not need to adjust the scheduling
30565 priorities of insns. */
30567 static int
30568 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30570 rtx load_mem, str_mem;
30571 /* On machines (like the 750) which have asymmetric integer units,
30572 where one integer unit can do multiply and divides and the other
30573 can't, reduce the priority of multiply/divide so it is scheduled
30574 before other integer operations. */
30576 #if 0
30577 if (! INSN_P (insn))
30578 return priority;
30580 if (GET_CODE (PATTERN (insn)) == USE)
30581 return priority;
30583 switch (rs6000_cpu_attr) {
30584 case CPU_PPC750:
30585 switch (get_attr_type (insn))
30587 default:
30588 break;
30590 case TYPE_MUL:
30591 case TYPE_DIV:
30592 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30593 priority, priority);
30594 if (priority >= 0 && priority < 0x01000000)
30595 priority >>= 3;
30596 break;
30599 #endif
30601 if (insn_must_be_first_in_group (insn)
30602 && reload_completed
30603 && current_sched_info->sched_max_insns_priority
30604 && rs6000_sched_restricted_insns_priority)
30607 /* Prioritize insns that can be dispatched only in the first
30608 dispatch slot. */
30609 if (rs6000_sched_restricted_insns_priority == 1)
30610 /* Attach highest priority to insn. This means that in
30611 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30612 precede 'priority' (critical path) considerations. */
30613 return current_sched_info->sched_max_insns_priority;
30614 else if (rs6000_sched_restricted_insns_priority == 2)
30615 /* Increase priority of insn by a minimal amount. This means that in
30616 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30617 considerations precede dispatch-slot restriction considerations. */
30618 return (priority + 1);
30621 if (rs6000_cpu == PROCESSOR_POWER6
30622 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30623 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30624 /* Attach highest priority to insn if the scheduler has just issued two
30625 stores and this instruction is a load, or two loads and this instruction
30626 is a store. Power6 wants loads and stores scheduled alternately
30627 when possible */
30628 return current_sched_info->sched_max_insns_priority;
30630 return priority;
30633 /* Return true if the instruction is nonpipelined on the Cell. */
30634 static bool
30635 is_nonpipeline_insn (rtx_insn *insn)
30637 enum attr_type type;
30638 if (!insn || !NONDEBUG_INSN_P (insn)
30639 || GET_CODE (PATTERN (insn)) == USE
30640 || GET_CODE (PATTERN (insn)) == CLOBBER)
30641 return false;
30643 type = get_attr_type (insn);
30644 if (type == TYPE_MUL
30645 || type == TYPE_DIV
30646 || type == TYPE_SDIV
30647 || type == TYPE_DDIV
30648 || type == TYPE_SSQRT
30649 || type == TYPE_DSQRT
30650 || type == TYPE_MFCR
30651 || type == TYPE_MFCRF
30652 || type == TYPE_MFJMPR)
30654 return true;
30656 return false;
30660 /* Return how many instructions the machine can issue per cycle. */
30662 static int
30663 rs6000_issue_rate (void)
30665 /* Unless scheduling for register pressure, use issue rate of 1 for
30666 first scheduling pass to decrease degradation. */
30667 if (!reload_completed && !flag_sched_pressure)
30668 return 1;
30670 switch (rs6000_cpu_attr) {
30671 case CPU_RS64A:
30672 case CPU_PPC601: /* ? */
30673 case CPU_PPC7450:
30674 return 3;
30675 case CPU_PPC440:
30676 case CPU_PPC603:
30677 case CPU_PPC750:
30678 case CPU_PPC7400:
30679 case CPU_PPC8540:
30680 case CPU_PPC8548:
30681 case CPU_CELL:
30682 case CPU_PPCE300C2:
30683 case CPU_PPCE300C3:
30684 case CPU_PPCE500MC:
30685 case CPU_PPCE500MC64:
30686 case CPU_PPCE5500:
30687 case CPU_PPCE6500:
30688 case CPU_TITAN:
30689 return 2;
30690 case CPU_PPC476:
30691 case CPU_PPC604:
30692 case CPU_PPC604E:
30693 case CPU_PPC620:
30694 case CPU_PPC630:
30695 return 4;
30696 case CPU_POWER4:
30697 case CPU_POWER5:
30698 case CPU_POWER6:
30699 case CPU_POWER7:
30700 return 5;
30701 case CPU_POWER8:
30702 return 7;
30703 case CPU_POWER9:
30704 return 6;
30705 default:
30706 return 1;
30710 /* Return how many instructions to look ahead for better insn
30711 scheduling. */
30713 static int
30714 rs6000_use_sched_lookahead (void)
30716 switch (rs6000_cpu_attr)
30718 case CPU_PPC8540:
30719 case CPU_PPC8548:
30720 return 4;
30722 case CPU_CELL:
30723 return (reload_completed ? 8 : 0);
30725 default:
30726 return 0;
30730 /* We are choosing insn from the ready queue. Return zero if INSN can be
30731 chosen. */
30732 static int
30733 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30735 if (ready_index == 0)
30736 return 0;
30738 if (rs6000_cpu_attr != CPU_CELL)
30739 return 0;
30741 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30743 if (!reload_completed
30744 || is_nonpipeline_insn (insn)
30745 || is_microcoded_insn (insn))
30746 return 1;
30748 return 0;
30751 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30752 and return true. */
30754 static bool
30755 find_mem_ref (rtx pat, rtx *mem_ref)
30757 const char * fmt;
30758 int i, j;
30760 /* stack_tie does not produce any real memory traffic. */
30761 if (tie_operand (pat, VOIDmode))
30762 return false;
30764 if (GET_CODE (pat) == MEM)
30766 *mem_ref = pat;
30767 return true;
30770 /* Recursively process the pattern. */
30771 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30773 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30775 if (fmt[i] == 'e')
30777 if (find_mem_ref (XEXP (pat, i), mem_ref))
30778 return true;
30780 else if (fmt[i] == 'E')
30781 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30783 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30784 return true;
30788 return false;
30791 /* Determine if PAT is a PATTERN of a load insn. */
30793 static bool
30794 is_load_insn1 (rtx pat, rtx *load_mem)
30796 if (!pat || pat == NULL_RTX)
30797 return false;
30799 if (GET_CODE (pat) == SET)
30800 return find_mem_ref (SET_SRC (pat), load_mem);
30802 if (GET_CODE (pat) == PARALLEL)
30804 int i;
30806 for (i = 0; i < XVECLEN (pat, 0); i++)
30807 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30808 return true;
30811 return false;
30814 /* Determine if INSN loads from memory. */
30816 static bool
30817 is_load_insn (rtx insn, rtx *load_mem)
30819 if (!insn || !INSN_P (insn))
30820 return false;
30822 if (CALL_P (insn))
30823 return false;
30825 return is_load_insn1 (PATTERN (insn), load_mem);
30828 /* Determine if PAT is a PATTERN of a store insn. */
30830 static bool
30831 is_store_insn1 (rtx pat, rtx *str_mem)
30833 if (!pat || pat == NULL_RTX)
30834 return false;
30836 if (GET_CODE (pat) == SET)
30837 return find_mem_ref (SET_DEST (pat), str_mem);
30839 if (GET_CODE (pat) == PARALLEL)
30841 int i;
30843 for (i = 0; i < XVECLEN (pat, 0); i++)
30844 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30845 return true;
30848 return false;
30851 /* Determine if INSN stores to memory. */
30853 static bool
30854 is_store_insn (rtx insn, rtx *str_mem)
30856 if (!insn || !INSN_P (insn))
30857 return false;
30859 return is_store_insn1 (PATTERN (insn), str_mem);
30862 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30864 static bool
30865 is_power9_pairable_vec_type (enum attr_type type)
30867 switch (type)
30869 case TYPE_VECSIMPLE:
30870 case TYPE_VECCOMPLEX:
30871 case TYPE_VECDIV:
30872 case TYPE_VECCMP:
30873 case TYPE_VECPERM:
30874 case TYPE_VECFLOAT:
30875 case TYPE_VECFDIV:
30876 case TYPE_VECDOUBLE:
30877 return true;
30878 default:
30879 break;
30881 return false;
30884 /* Returns whether the dependence between INSN and NEXT is considered
30885 costly by the given target. */
30887 static bool
30888 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30890 rtx insn;
30891 rtx next;
30892 rtx load_mem, str_mem;
30894 /* If the flag is not enabled - no dependence is considered costly;
30895 allow all dependent insns in the same group.
30896 This is the most aggressive option. */
30897 if (rs6000_sched_costly_dep == no_dep_costly)
30898 return false;
30900 /* If the flag is set to 1 - a dependence is always considered costly;
30901 do not allow dependent instructions in the same group.
30902 This is the most conservative option. */
30903 if (rs6000_sched_costly_dep == all_deps_costly)
30904 return true;
30906 insn = DEP_PRO (dep);
30907 next = DEP_CON (dep);
30909 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30910 && is_load_insn (next, &load_mem)
30911 && is_store_insn (insn, &str_mem))
30912 /* Prevent load after store in the same group. */
30913 return true;
30915 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30916 && is_load_insn (next, &load_mem)
30917 && is_store_insn (insn, &str_mem)
30918 && DEP_TYPE (dep) == REG_DEP_TRUE
30919 && mem_locations_overlap(str_mem, load_mem))
30920 /* Prevent load after store in the same group if it is a true
30921 dependence. */
30922 return true;
30924 /* The flag is set to X; dependences with latency >= X are considered costly,
30925 and will not be scheduled in the same group. */
30926 if (rs6000_sched_costly_dep <= max_dep_latency
30927 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
30928 return true;
30930 return false;
30933 /* Return the next insn after INSN that is found before TAIL is reached,
30934 skipping any "non-active" insns - insns that will not actually occupy
30935 an issue slot. Return NULL_RTX if such an insn is not found. */
30937 static rtx_insn *
30938 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
30940 if (insn == NULL_RTX || insn == tail)
30941 return NULL;
30943 while (1)
30945 insn = NEXT_INSN (insn);
30946 if (insn == NULL_RTX || insn == tail)
30947 return NULL;
30949 if (CALL_P (insn)
30950 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
30951 || (NONJUMP_INSN_P (insn)
30952 && GET_CODE (PATTERN (insn)) != USE
30953 && GET_CODE (PATTERN (insn)) != CLOBBER
30954 && INSN_CODE (insn) != CODE_FOR_stack_tie))
30955 break;
30957 return insn;
30960 /* Do Power9 specific sched_reorder2 reordering of ready list. */
30962 static int
30963 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
30965 int pos;
30966 int i;
30967 rtx_insn *tmp;
30968 enum attr_type type;
30970 type = get_attr_type (last_scheduled_insn);
30972 /* Try to issue fixed point divides back-to-back in pairs so they will be
30973 routed to separate execution units and execute in parallel. */
30974 if (type == TYPE_DIV && divide_cnt == 0)
30976 /* First divide has been scheduled. */
30977 divide_cnt = 1;
30979 /* Scan the ready list looking for another divide, if found move it
30980 to the end of the list so it is chosen next. */
30981 pos = lastpos;
30982 while (pos >= 0)
30984 if (recog_memoized (ready[pos]) >= 0
30985 && get_attr_type (ready[pos]) == TYPE_DIV)
30987 tmp = ready[pos];
30988 for (i = pos; i < lastpos; i++)
30989 ready[i] = ready[i + 1];
30990 ready[lastpos] = tmp;
30991 break;
30993 pos--;
30996 else
30998 /* Last insn was the 2nd divide or not a divide, reset the counter. */
30999 divide_cnt = 0;
31001 /* Power9 can execute 2 vector operations and 2 vector loads in a single
31002 cycle. So try to pair up and alternate groups of vector and vector
31003 load instructions.
31005 To aid this formation, a counter is maintained to keep track of
31006 vec/vecload insns issued. The value of vec_load_pendulum maintains
31007 the current state with the following values:
31009 0 : Initial state, no vec/vecload group has been started.
31011 -1 : 1 vector load has been issued and another has been found on
31012 the ready list and moved to the end.
31014 -2 : 2 vector loads have been issued and a vector operation has
31015 been found and moved to the end of the ready list.
31017 -3 : 2 vector loads and a vector insn have been issued and a
31018 vector operation has been found and moved to the end of the
31019 ready list.
31021 1 : 1 vector insn has been issued and another has been found and
31022 moved to the end of the ready list.
31024 2 : 2 vector insns have been issued and a vector load has been
31025 found and moved to the end of the ready list.
31027 3 : 2 vector insns and a vector load have been issued and another
31028 vector load has been found and moved to the end of the ready
31029 list. */
31030 if (type == TYPE_VECLOAD)
31032 /* Issued a vecload. */
31033 if (vec_load_pendulum == 0)
31035 /* We issued a single vecload, look for another and move it to
31036 the end of the ready list so it will be scheduled next.
31037 Set pendulum if found. */
31038 pos = lastpos;
31039 while (pos >= 0)
31041 if (recog_memoized (ready[pos]) >= 0
31042 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
31044 tmp = ready[pos];
31045 for (i = pos; i < lastpos; i++)
31046 ready[i] = ready[i + 1];
31047 ready[lastpos] = tmp;
31048 vec_load_pendulum = -1;
31049 return cached_can_issue_more;
31051 pos--;
31054 else if (vec_load_pendulum == -1)
31056 /* This is the second vecload we've issued, search the ready
31057 list for a vector operation so we can try to schedule a
31058 pair of those next. If found move to the end of the ready
31059 list so it is scheduled next and set the pendulum. */
31060 pos = lastpos;
31061 while (pos >= 0)
31063 if (recog_memoized (ready[pos]) >= 0
31064 && is_power9_pairable_vec_type (
31065 get_attr_type (ready[pos])))
31067 tmp = ready[pos];
31068 for (i = pos; i < lastpos; i++)
31069 ready[i] = ready[i + 1];
31070 ready[lastpos] = tmp;
31071 vec_load_pendulum = -2;
31072 return cached_can_issue_more;
31074 pos--;
31077 else if (vec_load_pendulum == 2)
31079 /* Two vector ops have been issued and we've just issued a
31080 vecload, look for another vecload and move to end of ready
31081 list if found. */
31082 pos = lastpos;
31083 while (pos >= 0)
31085 if (recog_memoized (ready[pos]) >= 0
31086 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
31088 tmp = ready[pos];
31089 for (i = pos; i < lastpos; i++)
31090 ready[i] = ready[i + 1];
31091 ready[lastpos] = tmp;
31092 /* Set pendulum so that next vecload will be seen as
31093 finishing a group, not start of one. */
31094 vec_load_pendulum = 3;
31095 return cached_can_issue_more;
31097 pos--;
31101 else if (is_power9_pairable_vec_type (type))
31103 /* Issued a vector operation. */
31104 if (vec_load_pendulum == 0)
31105 /* We issued a single vec op, look for another and move it
31106 to the end of the ready list so it will be scheduled next.
31107 Set pendulum if found. */
31109 pos = lastpos;
31110 while (pos >= 0)
31112 if (recog_memoized (ready[pos]) >= 0
31113 && is_power9_pairable_vec_type (
31114 get_attr_type (ready[pos])))
31116 tmp = ready[pos];
31117 for (i = pos; i < lastpos; i++)
31118 ready[i] = ready[i + 1];
31119 ready[lastpos] = tmp;
31120 vec_load_pendulum = 1;
31121 return cached_can_issue_more;
31123 pos--;
31126 else if (vec_load_pendulum == 1)
31128 /* This is the second vec op we've issued, search the ready
31129 list for a vecload operation so we can try to schedule a
31130 pair of those next. If found move to the end of the ready
31131 list so it is scheduled next and set the pendulum. */
31132 pos = lastpos;
31133 while (pos >= 0)
31135 if (recog_memoized (ready[pos]) >= 0
31136 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
31138 tmp = ready[pos];
31139 for (i = pos; i < lastpos; i++)
31140 ready[i] = ready[i + 1];
31141 ready[lastpos] = tmp;
31142 vec_load_pendulum = 2;
31143 return cached_can_issue_more;
31145 pos--;
31148 else if (vec_load_pendulum == -2)
31150 /* Two vecload ops have been issued and we've just issued a
31151 vec op, look for another vec op and move to end of ready
31152 list if found. */
31153 pos = lastpos;
31154 while (pos >= 0)
31156 if (recog_memoized (ready[pos]) >= 0
31157 && is_power9_pairable_vec_type (
31158 get_attr_type (ready[pos])))
31160 tmp = ready[pos];
31161 for (i = pos; i < lastpos; i++)
31162 ready[i] = ready[i + 1];
31163 ready[lastpos] = tmp;
31164 /* Set pendulum so that next vec op will be seen as
31165 finishing a group, not start of one. */
31166 vec_load_pendulum = -3;
31167 return cached_can_issue_more;
31169 pos--;
31174 /* We've either finished a vec/vecload group, couldn't find an insn to
31175 continue the current group, or the last insn had nothing to do with
31176 with a group. In any case, reset the pendulum. */
31177 vec_load_pendulum = 0;
31180 return cached_can_issue_more;
31183 /* We are about to begin issuing insns for this clock cycle. */
31185 static int
31186 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31187 rtx_insn **ready ATTRIBUTE_UNUSED,
31188 int *pn_ready ATTRIBUTE_UNUSED,
31189 int clock_var ATTRIBUTE_UNUSED)
31191 int n_ready = *pn_ready;
31193 if (sched_verbose)
31194 fprintf (dump, "// rs6000_sched_reorder :\n");
31196 /* Reorder the ready list, if the second to last ready insn
31197 is a nonepipeline insn. */
31198 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31200 if (is_nonpipeline_insn (ready[n_ready - 1])
31201 && (recog_memoized (ready[n_ready - 2]) > 0))
31202 /* Simply swap first two insns. */
31203 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31206 if (rs6000_cpu == PROCESSOR_POWER6)
31207 load_store_pendulum = 0;
31209 return rs6000_issue_rate ();
31212 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31214 static int
31215 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31216 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31218 if (sched_verbose)
31219 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31221 /* For Power6, we need to handle some special cases to try and keep the
31222 store queue from overflowing and triggering expensive flushes.
31224 This code monitors how load and store instructions are being issued
31225 and skews the ready list one way or the other to increase the likelihood
31226 that a desired instruction is issued at the proper time.
31228 A couple of things are done. First, we maintain a "load_store_pendulum"
31229 to track the current state of load/store issue.
31231 - If the pendulum is at zero, then no loads or stores have been
31232 issued in the current cycle so we do nothing.
31234 - If the pendulum is 1, then a single load has been issued in this
31235 cycle and we attempt to locate another load in the ready list to
31236 issue with it.
31238 - If the pendulum is -2, then two stores have already been
31239 issued in this cycle, so we increase the priority of the first load
31240 in the ready list to increase it's likelihood of being chosen first
31241 in the next cycle.
31243 - If the pendulum is -1, then a single store has been issued in this
31244 cycle and we attempt to locate another store in the ready list to
31245 issue with it, preferring a store to an adjacent memory location to
31246 facilitate store pairing in the store queue.
31248 - If the pendulum is 2, then two loads have already been
31249 issued in this cycle, so we increase the priority of the first store
31250 in the ready list to increase it's likelihood of being chosen first
31251 in the next cycle.
31253 - If the pendulum < -2 or > 2, then do nothing.
31255 Note: This code covers the most common scenarios. There exist non
31256 load/store instructions which make use of the LSU and which
31257 would need to be accounted for to strictly model the behavior
31258 of the machine. Those instructions are currently unaccounted
31259 for to help minimize compile time overhead of this code.
31261 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31263 int pos;
31264 int i;
31265 rtx_insn *tmp;
31266 rtx load_mem, str_mem;
31268 if (is_store_insn (last_scheduled_insn, &str_mem))
31269 /* Issuing a store, swing the load_store_pendulum to the left */
31270 load_store_pendulum--;
31271 else if (is_load_insn (last_scheduled_insn, &load_mem))
31272 /* Issuing a load, swing the load_store_pendulum to the right */
31273 load_store_pendulum++;
31274 else
31275 return cached_can_issue_more;
31277 /* If the pendulum is balanced, or there is only one instruction on
31278 the ready list, then all is well, so return. */
31279 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31280 return cached_can_issue_more;
31282 if (load_store_pendulum == 1)
31284 /* A load has been issued in this cycle. Scan the ready list
31285 for another load to issue with it */
31286 pos = *pn_ready-1;
31288 while (pos >= 0)
31290 if (is_load_insn (ready[pos], &load_mem))
31292 /* Found a load. Move it to the head of the ready list,
31293 and adjust it's priority so that it is more likely to
31294 stay there */
31295 tmp = ready[pos];
31296 for (i=pos; i<*pn_ready-1; i++)
31297 ready[i] = ready[i + 1];
31298 ready[*pn_ready-1] = tmp;
31300 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31301 INSN_PRIORITY (tmp)++;
31302 break;
31304 pos--;
31307 else if (load_store_pendulum == -2)
31309 /* Two stores have been issued in this cycle. Increase the
31310 priority of the first load in the ready list to favor it for
31311 issuing in the next cycle. */
31312 pos = *pn_ready-1;
31314 while (pos >= 0)
31316 if (is_load_insn (ready[pos], &load_mem)
31317 && !sel_sched_p ()
31318 && INSN_PRIORITY_KNOWN (ready[pos]))
31320 INSN_PRIORITY (ready[pos])++;
31322 /* Adjust the pendulum to account for the fact that a load
31323 was found and increased in priority. This is to prevent
31324 increasing the priority of multiple loads */
31325 load_store_pendulum--;
31327 break;
31329 pos--;
31332 else if (load_store_pendulum == -1)
31334 /* A store has been issued in this cycle. Scan the ready list for
31335 another store to issue with it, preferring a store to an adjacent
31336 memory location */
31337 int first_store_pos = -1;
31339 pos = *pn_ready-1;
31341 while (pos >= 0)
31343 if (is_store_insn (ready[pos], &str_mem))
31345 rtx str_mem2;
31346 /* Maintain the index of the first store found on the
31347 list */
31348 if (first_store_pos == -1)
31349 first_store_pos = pos;
31351 if (is_store_insn (last_scheduled_insn, &str_mem2)
31352 && adjacent_mem_locations (str_mem, str_mem2))
31354 /* Found an adjacent store. Move it to the head of the
31355 ready list, and adjust it's priority so that it is
31356 more likely to stay there */
31357 tmp = ready[pos];
31358 for (i=pos; i<*pn_ready-1; i++)
31359 ready[i] = ready[i + 1];
31360 ready[*pn_ready-1] = tmp;
31362 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31363 INSN_PRIORITY (tmp)++;
31365 first_store_pos = -1;
31367 break;
31370 pos--;
31373 if (first_store_pos >= 0)
31375 /* An adjacent store wasn't found, but a non-adjacent store was,
31376 so move the non-adjacent store to the front of the ready
31377 list, and adjust its priority so that it is more likely to
31378 stay there. */
31379 tmp = ready[first_store_pos];
31380 for (i=first_store_pos; i<*pn_ready-1; i++)
31381 ready[i] = ready[i + 1];
31382 ready[*pn_ready-1] = tmp;
31383 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31384 INSN_PRIORITY (tmp)++;
31387 else if (load_store_pendulum == 2)
31389 /* Two loads have been issued in this cycle. Increase the priority
31390 of the first store in the ready list to favor it for issuing in
31391 the next cycle. */
31392 pos = *pn_ready-1;
31394 while (pos >= 0)
31396 if (is_store_insn (ready[pos], &str_mem)
31397 && !sel_sched_p ()
31398 && INSN_PRIORITY_KNOWN (ready[pos]))
31400 INSN_PRIORITY (ready[pos])++;
31402 /* Adjust the pendulum to account for the fact that a store
31403 was found and increased in priority. This is to prevent
31404 increasing the priority of multiple stores */
31405 load_store_pendulum++;
31407 break;
31409 pos--;
31414 /* Do Power9 dependent reordering if necessary. */
31415 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31416 && recog_memoized (last_scheduled_insn) >= 0)
31417 return power9_sched_reorder2 (ready, *pn_ready - 1);
31419 return cached_can_issue_more;
31422 /* Return whether the presence of INSN causes a dispatch group termination
31423 of group WHICH_GROUP.
31425 If WHICH_GROUP == current_group, this function will return true if INSN
31426 causes the termination of the current group (i.e, the dispatch group to
31427 which INSN belongs). This means that INSN will be the last insn in the
31428 group it belongs to.
31430 If WHICH_GROUP == previous_group, this function will return true if INSN
31431 causes the termination of the previous group (i.e, the dispatch group that
31432 precedes the group to which INSN belongs). This means that INSN will be
31433 the first insn in the group it belongs to). */
31435 static bool
31436 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31438 bool first, last;
31440 if (! insn)
31441 return false;
31443 first = insn_must_be_first_in_group (insn);
31444 last = insn_must_be_last_in_group (insn);
31446 if (first && last)
31447 return true;
31449 if (which_group == current_group)
31450 return last;
31451 else if (which_group == previous_group)
31452 return first;
31454 return false;
31458 static bool
31459 insn_must_be_first_in_group (rtx_insn *insn)
31461 enum attr_type type;
31463 if (!insn
31464 || NOTE_P (insn)
31465 || DEBUG_INSN_P (insn)
31466 || GET_CODE (PATTERN (insn)) == USE
31467 || GET_CODE (PATTERN (insn)) == CLOBBER)
31468 return false;
31470 switch (rs6000_cpu)
31472 case PROCESSOR_POWER5:
31473 if (is_cracked_insn (insn))
31474 return true;
31475 case PROCESSOR_POWER4:
31476 if (is_microcoded_insn (insn))
31477 return true;
31479 if (!rs6000_sched_groups)
31480 return false;
31482 type = get_attr_type (insn);
31484 switch (type)
31486 case TYPE_MFCR:
31487 case TYPE_MFCRF:
31488 case TYPE_MTCR:
31489 case TYPE_DELAYED_CR:
31490 case TYPE_CR_LOGICAL:
31491 case TYPE_MTJMPR:
31492 case TYPE_MFJMPR:
31493 case TYPE_DIV:
31494 case TYPE_LOAD_L:
31495 case TYPE_STORE_C:
31496 case TYPE_ISYNC:
31497 case TYPE_SYNC:
31498 return true;
31499 default:
31500 break;
31502 break;
31503 case PROCESSOR_POWER6:
31504 type = get_attr_type (insn);
31506 switch (type)
31508 case TYPE_EXTS:
31509 case TYPE_CNTLZ:
31510 case TYPE_TRAP:
31511 case TYPE_MUL:
31512 case TYPE_INSERT:
31513 case TYPE_FPCOMPARE:
31514 case TYPE_MFCR:
31515 case TYPE_MTCR:
31516 case TYPE_MFJMPR:
31517 case TYPE_MTJMPR:
31518 case TYPE_ISYNC:
31519 case TYPE_SYNC:
31520 case TYPE_LOAD_L:
31521 case TYPE_STORE_C:
31522 return true;
31523 case TYPE_SHIFT:
31524 if (get_attr_dot (insn) == DOT_NO
31525 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31526 return true;
31527 else
31528 break;
31529 case TYPE_DIV:
31530 if (get_attr_size (insn) == SIZE_32)
31531 return true;
31532 else
31533 break;
31534 case TYPE_LOAD:
31535 case TYPE_STORE:
31536 case TYPE_FPLOAD:
31537 case TYPE_FPSTORE:
31538 if (get_attr_update (insn) == UPDATE_YES)
31539 return true;
31540 else
31541 break;
31542 default:
31543 break;
31545 break;
31546 case PROCESSOR_POWER7:
31547 type = get_attr_type (insn);
31549 switch (type)
31551 case TYPE_CR_LOGICAL:
31552 case TYPE_MFCR:
31553 case TYPE_MFCRF:
31554 case TYPE_MTCR:
31555 case TYPE_DIV:
31556 case TYPE_ISYNC:
31557 case TYPE_LOAD_L:
31558 case TYPE_STORE_C:
31559 case TYPE_MFJMPR:
31560 case TYPE_MTJMPR:
31561 return true;
31562 case TYPE_MUL:
31563 case TYPE_SHIFT:
31564 case TYPE_EXTS:
31565 if (get_attr_dot (insn) == DOT_YES)
31566 return true;
31567 else
31568 break;
31569 case TYPE_LOAD:
31570 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31571 || get_attr_update (insn) == UPDATE_YES)
31572 return true;
31573 else
31574 break;
31575 case TYPE_STORE:
31576 case TYPE_FPLOAD:
31577 case TYPE_FPSTORE:
31578 if (get_attr_update (insn) == UPDATE_YES)
31579 return true;
31580 else
31581 break;
31582 default:
31583 break;
31585 break;
31586 case PROCESSOR_POWER8:
31587 type = get_attr_type (insn);
31589 switch (type)
31591 case TYPE_CR_LOGICAL:
31592 case TYPE_DELAYED_CR:
31593 case TYPE_MFCR:
31594 case TYPE_MFCRF:
31595 case TYPE_MTCR:
31596 case TYPE_SYNC:
31597 case TYPE_ISYNC:
31598 case TYPE_LOAD_L:
31599 case TYPE_STORE_C:
31600 case TYPE_VECSTORE:
31601 case TYPE_MFJMPR:
31602 case TYPE_MTJMPR:
31603 return true;
31604 case TYPE_SHIFT:
31605 case TYPE_EXTS:
31606 case TYPE_MUL:
31607 if (get_attr_dot (insn) == DOT_YES)
31608 return true;
31609 else
31610 break;
31611 case TYPE_LOAD:
31612 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31613 || get_attr_update (insn) == UPDATE_YES)
31614 return true;
31615 else
31616 break;
31617 case TYPE_STORE:
31618 if (get_attr_update (insn) == UPDATE_YES
31619 && get_attr_indexed (insn) == INDEXED_YES)
31620 return true;
31621 else
31622 break;
31623 default:
31624 break;
31626 break;
31627 default:
31628 break;
31631 return false;
31634 static bool
31635 insn_must_be_last_in_group (rtx_insn *insn)
31637 enum attr_type type;
31639 if (!insn
31640 || NOTE_P (insn)
31641 || DEBUG_INSN_P (insn)
31642 || GET_CODE (PATTERN (insn)) == USE
31643 || GET_CODE (PATTERN (insn)) == CLOBBER)
31644 return false;
31646 switch (rs6000_cpu) {
31647 case PROCESSOR_POWER4:
31648 case PROCESSOR_POWER5:
31649 if (is_microcoded_insn (insn))
31650 return true;
31652 if (is_branch_slot_insn (insn))
31653 return true;
31655 break;
31656 case PROCESSOR_POWER6:
31657 type = get_attr_type (insn);
31659 switch (type)
31661 case TYPE_EXTS:
31662 case TYPE_CNTLZ:
31663 case TYPE_TRAP:
31664 case TYPE_MUL:
31665 case TYPE_FPCOMPARE:
31666 case TYPE_MFCR:
31667 case TYPE_MTCR:
31668 case TYPE_MFJMPR:
31669 case TYPE_MTJMPR:
31670 case TYPE_ISYNC:
31671 case TYPE_SYNC:
31672 case TYPE_LOAD_L:
31673 case TYPE_STORE_C:
31674 return true;
31675 case TYPE_SHIFT:
31676 if (get_attr_dot (insn) == DOT_NO
31677 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31678 return true;
31679 else
31680 break;
31681 case TYPE_DIV:
31682 if (get_attr_size (insn) == SIZE_32)
31683 return true;
31684 else
31685 break;
31686 default:
31687 break;
31689 break;
31690 case PROCESSOR_POWER7:
31691 type = get_attr_type (insn);
31693 switch (type)
31695 case TYPE_ISYNC:
31696 case TYPE_SYNC:
31697 case TYPE_LOAD_L:
31698 case TYPE_STORE_C:
31699 return true;
31700 case TYPE_LOAD:
31701 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31702 && get_attr_update (insn) == UPDATE_YES)
31703 return true;
31704 else
31705 break;
31706 case TYPE_STORE:
31707 if (get_attr_update (insn) == UPDATE_YES
31708 && get_attr_indexed (insn) == INDEXED_YES)
31709 return true;
31710 else
31711 break;
31712 default:
31713 break;
31715 break;
31716 case PROCESSOR_POWER8:
31717 type = get_attr_type (insn);
31719 switch (type)
31721 case TYPE_MFCR:
31722 case TYPE_MTCR:
31723 case TYPE_ISYNC:
31724 case TYPE_SYNC:
31725 case TYPE_LOAD_L:
31726 case TYPE_STORE_C:
31727 return true;
31728 case TYPE_LOAD:
31729 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31730 && get_attr_update (insn) == UPDATE_YES)
31731 return true;
31732 else
31733 break;
31734 case TYPE_STORE:
31735 if (get_attr_update (insn) == UPDATE_YES
31736 && get_attr_indexed (insn) == INDEXED_YES)
31737 return true;
31738 else
31739 break;
31740 default:
31741 break;
31743 break;
31744 default:
31745 break;
31748 return false;
31751 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31752 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31754 static bool
31755 is_costly_group (rtx *group_insns, rtx next_insn)
31757 int i;
31758 int issue_rate = rs6000_issue_rate ();
31760 for (i = 0; i < issue_rate; i++)
31762 sd_iterator_def sd_it;
31763 dep_t dep;
31764 rtx insn = group_insns[i];
31766 if (!insn)
31767 continue;
31769 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31771 rtx next = DEP_CON (dep);
31773 if (next == next_insn
31774 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31775 return true;
31779 return false;
31782 /* Utility of the function redefine_groups.
31783 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31784 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31785 to keep it "far" (in a separate group) from GROUP_INSNS, following
31786 one of the following schemes, depending on the value of the flag
31787 -minsert_sched_nops = X:
31788 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31789 in order to force NEXT_INSN into a separate group.
31790 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31791 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31792 insertion (has a group just ended, how many vacant issue slots remain in the
31793 last group, and how many dispatch groups were encountered so far). */
31795 static int
31796 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31797 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31798 int *group_count)
31800 rtx nop;
31801 bool force;
31802 int issue_rate = rs6000_issue_rate ();
31803 bool end = *group_end;
31804 int i;
31806 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31807 return can_issue_more;
31809 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31810 return can_issue_more;
31812 force = is_costly_group (group_insns, next_insn);
31813 if (!force)
31814 return can_issue_more;
31816 if (sched_verbose > 6)
31817 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31818 *group_count ,can_issue_more);
31820 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31822 if (*group_end)
31823 can_issue_more = 0;
31825 /* Since only a branch can be issued in the last issue_slot, it is
31826 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31827 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31828 in this case the last nop will start a new group and the branch
31829 will be forced to the new group. */
31830 if (can_issue_more && !is_branch_slot_insn (next_insn))
31831 can_issue_more--;
31833 /* Do we have a special group ending nop? */
31834 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
31835 || rs6000_cpu_attr == CPU_POWER8)
31837 nop = gen_group_ending_nop ();
31838 emit_insn_before (nop, next_insn);
31839 can_issue_more = 0;
31841 else
31842 while (can_issue_more > 0)
31844 nop = gen_nop ();
31845 emit_insn_before (nop, next_insn);
31846 can_issue_more--;
31849 *group_end = true;
31850 return 0;
31853 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31855 int n_nops = rs6000_sched_insert_nops;
31857 /* Nops can't be issued from the branch slot, so the effective
31858 issue_rate for nops is 'issue_rate - 1'. */
31859 if (can_issue_more == 0)
31860 can_issue_more = issue_rate;
31861 can_issue_more--;
31862 if (can_issue_more == 0)
31864 can_issue_more = issue_rate - 1;
31865 (*group_count)++;
31866 end = true;
31867 for (i = 0; i < issue_rate; i++)
31869 group_insns[i] = 0;
31873 while (n_nops > 0)
31875 nop = gen_nop ();
31876 emit_insn_before (nop, next_insn);
31877 if (can_issue_more == issue_rate - 1) /* new group begins */
31878 end = false;
31879 can_issue_more--;
31880 if (can_issue_more == 0)
31882 can_issue_more = issue_rate - 1;
31883 (*group_count)++;
31884 end = true;
31885 for (i = 0; i < issue_rate; i++)
31887 group_insns[i] = 0;
31890 n_nops--;
31893 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31894 can_issue_more++;
31896 /* Is next_insn going to start a new group? */
31897 *group_end
31898 = (end
31899 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31900 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31901 || (can_issue_more < issue_rate &&
31902 insn_terminates_group_p (next_insn, previous_group)));
31903 if (*group_end && end)
31904 (*group_count)--;
31906 if (sched_verbose > 6)
31907 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31908 *group_count, can_issue_more);
31909 return can_issue_more;
31912 return can_issue_more;
31915 /* This function tries to synch the dispatch groups that the compiler "sees"
31916 with the dispatch groups that the processor dispatcher is expected to
31917 form in practice. It tries to achieve this synchronization by forcing the
31918 estimated processor grouping on the compiler (as opposed to the function
31919 'pad_goups' which tries to force the scheduler's grouping on the processor).
31921 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31922 examines the (estimated) dispatch groups that will be formed by the processor
31923 dispatcher. It marks these group boundaries to reflect the estimated
31924 processor grouping, overriding the grouping that the scheduler had marked.
31925 Depending on the value of the flag '-minsert-sched-nops' this function can
31926 force certain insns into separate groups or force a certain distance between
31927 them by inserting nops, for example, if there exists a "costly dependence"
31928 between the insns.
31930 The function estimates the group boundaries that the processor will form as
31931 follows: It keeps track of how many vacant issue slots are available after
31932 each insn. A subsequent insn will start a new group if one of the following
31933 4 cases applies:
31934 - no more vacant issue slots remain in the current dispatch group.
31935 - only the last issue slot, which is the branch slot, is vacant, but the next
31936 insn is not a branch.
31937 - only the last 2 or less issue slots, including the branch slot, are vacant,
31938 which means that a cracked insn (which occupies two issue slots) can't be
31939 issued in this group.
31940 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31941 start a new group. */
31943 static int
31944 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31945 rtx_insn *tail)
31947 rtx_insn *insn, *next_insn;
31948 int issue_rate;
31949 int can_issue_more;
31950 int slot, i;
31951 bool group_end;
31952 int group_count = 0;
31953 rtx *group_insns;
31955 /* Initialize. */
31956 issue_rate = rs6000_issue_rate ();
31957 group_insns = XALLOCAVEC (rtx, issue_rate);
31958 for (i = 0; i < issue_rate; i++)
31960 group_insns[i] = 0;
31962 can_issue_more = issue_rate;
31963 slot = 0;
31964 insn = get_next_active_insn (prev_head_insn, tail);
31965 group_end = false;
31967 while (insn != NULL_RTX)
31969 slot = (issue_rate - can_issue_more);
31970 group_insns[slot] = insn;
31971 can_issue_more =
31972 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31973 if (insn_terminates_group_p (insn, current_group))
31974 can_issue_more = 0;
31976 next_insn = get_next_active_insn (insn, tail);
31977 if (next_insn == NULL_RTX)
31978 return group_count + 1;
31980 /* Is next_insn going to start a new group? */
31981 group_end
31982 = (can_issue_more == 0
31983 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31984 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31985 || (can_issue_more < issue_rate &&
31986 insn_terminates_group_p (next_insn, previous_group)));
31988 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
31989 next_insn, &group_end, can_issue_more,
31990 &group_count);
31992 if (group_end)
31994 group_count++;
31995 can_issue_more = 0;
31996 for (i = 0; i < issue_rate; i++)
31998 group_insns[i] = 0;
32002 if (GET_MODE (next_insn) == TImode && can_issue_more)
32003 PUT_MODE (next_insn, VOIDmode);
32004 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32005 PUT_MODE (next_insn, TImode);
32007 insn = next_insn;
32008 if (can_issue_more == 0)
32009 can_issue_more = issue_rate;
32010 } /* while */
32012 return group_count;
32015 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32016 dispatch group boundaries that the scheduler had marked. Pad with nops
32017 any dispatch groups which have vacant issue slots, in order to force the
32018 scheduler's grouping on the processor dispatcher. The function
32019 returns the number of dispatch groups found. */
32021 static int
32022 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32023 rtx_insn *tail)
32025 rtx_insn *insn, *next_insn;
32026 rtx nop;
32027 int issue_rate;
32028 int can_issue_more;
32029 int group_end;
32030 int group_count = 0;
32032 /* Initialize issue_rate. */
32033 issue_rate = rs6000_issue_rate ();
32034 can_issue_more = issue_rate;
32036 insn = get_next_active_insn (prev_head_insn, tail);
32037 next_insn = get_next_active_insn (insn, tail);
32039 while (insn != NULL_RTX)
32041 can_issue_more =
32042 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32044 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32046 if (next_insn == NULL_RTX)
32047 break;
32049 if (group_end)
32051 /* If the scheduler had marked group termination at this location
32052 (between insn and next_insn), and neither insn nor next_insn will
32053 force group termination, pad the group with nops to force group
32054 termination. */
32055 if (can_issue_more
32056 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32057 && !insn_terminates_group_p (insn, current_group)
32058 && !insn_terminates_group_p (next_insn, previous_group))
32060 if (!is_branch_slot_insn (next_insn))
32061 can_issue_more--;
32063 while (can_issue_more)
32065 nop = gen_nop ();
32066 emit_insn_before (nop, next_insn);
32067 can_issue_more--;
32071 can_issue_more = issue_rate;
32072 group_count++;
32075 insn = next_insn;
32076 next_insn = get_next_active_insn (insn, tail);
32079 return group_count;
32082 /* We're beginning a new block. Initialize data structures as necessary. */
32084 static void
32085 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32086 int sched_verbose ATTRIBUTE_UNUSED,
32087 int max_ready ATTRIBUTE_UNUSED)
32089 last_scheduled_insn = NULL;
32090 load_store_pendulum = 0;
32091 divide_cnt = 0;
32092 vec_load_pendulum = 0;
32095 /* The following function is called at the end of scheduling BB.
32096 After reload, it inserts nops at insn group bundling. */
32098 static void
32099 rs6000_sched_finish (FILE *dump, int sched_verbose)
32101 int n_groups;
32103 if (sched_verbose)
32104 fprintf (dump, "=== Finishing schedule.\n");
32106 if (reload_completed && rs6000_sched_groups)
32108 /* Do not run sched_finish hook when selective scheduling enabled. */
32109 if (sel_sched_p ())
32110 return;
32112 if (rs6000_sched_insert_nops == sched_finish_none)
32113 return;
32115 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32116 n_groups = pad_groups (dump, sched_verbose,
32117 current_sched_info->prev_head,
32118 current_sched_info->next_tail);
32119 else
32120 n_groups = redefine_groups (dump, sched_verbose,
32121 current_sched_info->prev_head,
32122 current_sched_info->next_tail);
32124 if (sched_verbose >= 6)
32126 fprintf (dump, "ngroups = %d\n", n_groups);
32127 print_rtl (dump, current_sched_info->prev_head);
32128 fprintf (dump, "Done finish_sched\n");
32133 struct rs6000_sched_context
32135 short cached_can_issue_more;
32136 rtx_insn *last_scheduled_insn;
32137 int load_store_pendulum;
32138 int divide_cnt;
32139 int vec_load_pendulum;
32142 typedef struct rs6000_sched_context rs6000_sched_context_def;
32143 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32145 /* Allocate store for new scheduling context. */
32146 static void *
32147 rs6000_alloc_sched_context (void)
32149 return xmalloc (sizeof (rs6000_sched_context_def));
32152 /* If CLEAN_P is true then initializes _SC with clean data,
32153 and from the global context otherwise. */
32154 static void
32155 rs6000_init_sched_context (void *_sc, bool clean_p)
32157 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32159 if (clean_p)
32161 sc->cached_can_issue_more = 0;
32162 sc->last_scheduled_insn = NULL;
32163 sc->load_store_pendulum = 0;
32164 sc->divide_cnt = 0;
32165 sc->vec_load_pendulum = 0;
32167 else
32169 sc->cached_can_issue_more = cached_can_issue_more;
32170 sc->last_scheduled_insn = last_scheduled_insn;
32171 sc->load_store_pendulum = load_store_pendulum;
32172 sc->divide_cnt = divide_cnt;
32173 sc->vec_load_pendulum = vec_load_pendulum;
32177 /* Sets the global scheduling context to the one pointed to by _SC. */
32178 static void
32179 rs6000_set_sched_context (void *_sc)
32181 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32183 gcc_assert (sc != NULL);
32185 cached_can_issue_more = sc->cached_can_issue_more;
32186 last_scheduled_insn = sc->last_scheduled_insn;
32187 load_store_pendulum = sc->load_store_pendulum;
32188 divide_cnt = sc->divide_cnt;
32189 vec_load_pendulum = sc->vec_load_pendulum;
32192 /* Free _SC. */
32193 static void
32194 rs6000_free_sched_context (void *_sc)
32196 gcc_assert (_sc != NULL);
32198 free (_sc);
32202 /* Length in units of the trampoline for entering a nested function. */
32205 rs6000_trampoline_size (void)
32207 int ret = 0;
32209 switch (DEFAULT_ABI)
32211 default:
32212 gcc_unreachable ();
32214 case ABI_AIX:
32215 ret = (TARGET_32BIT) ? 12 : 24;
32216 break;
32218 case ABI_ELFv2:
32219 gcc_assert (!TARGET_32BIT);
32220 ret = 32;
32221 break;
32223 case ABI_DARWIN:
32224 case ABI_V4:
32225 ret = (TARGET_32BIT) ? 40 : 48;
32226 break;
32229 return ret;
32232 /* Emit RTL insns to initialize the variable parts of a trampoline.
32233 FNADDR is an RTX for the address of the function's pure code.
32234 CXT is an RTX for the static chain value for the function. */
32236 static void
32237 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32239 int regsize = (TARGET_32BIT) ? 4 : 8;
32240 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32241 rtx ctx_reg = force_reg (Pmode, cxt);
32242 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32244 switch (DEFAULT_ABI)
32246 default:
32247 gcc_unreachable ();
32249 /* Under AIX, just build the 3 word function descriptor */
32250 case ABI_AIX:
32252 rtx fnmem, fn_reg, toc_reg;
32254 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32255 error ("You cannot take the address of a nested function if you use "
32256 "the -mno-pointers-to-nested-functions option.");
32258 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32259 fn_reg = gen_reg_rtx (Pmode);
32260 toc_reg = gen_reg_rtx (Pmode);
32262 /* Macro to shorten the code expansions below. */
32263 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32265 m_tramp = replace_equiv_address (m_tramp, addr);
32267 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32268 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32269 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32270 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32271 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32273 # undef MEM_PLUS
32275 break;
32277 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32278 case ABI_ELFv2:
32279 case ABI_DARWIN:
32280 case ABI_V4:
32281 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32282 LCT_NORMAL, VOIDmode, 4,
32283 addr, Pmode,
32284 GEN_INT (rs6000_trampoline_size ()), SImode,
32285 fnaddr, Pmode,
32286 ctx_reg, Pmode);
32287 break;
32292 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32293 identifier as an argument, so the front end shouldn't look it up. */
32295 static bool
32296 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32298 return is_attribute_p ("altivec", attr_id);
32301 /* Handle the "altivec" attribute. The attribute may have
32302 arguments as follows:
32304 __attribute__((altivec(vector__)))
32305 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32306 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32308 and may appear more than once (e.g., 'vector bool char') in a
32309 given declaration. */
32311 static tree
32312 rs6000_handle_altivec_attribute (tree *node,
32313 tree name ATTRIBUTE_UNUSED,
32314 tree args,
32315 int flags ATTRIBUTE_UNUSED,
32316 bool *no_add_attrs)
32318 tree type = *node, result = NULL_TREE;
32319 machine_mode mode;
32320 int unsigned_p;
32321 char altivec_type
32322 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32323 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32324 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32325 : '?');
32327 while (POINTER_TYPE_P (type)
32328 || TREE_CODE (type) == FUNCTION_TYPE
32329 || TREE_CODE (type) == METHOD_TYPE
32330 || TREE_CODE (type) == ARRAY_TYPE)
32331 type = TREE_TYPE (type);
32333 mode = TYPE_MODE (type);
32335 /* Check for invalid AltiVec type qualifiers. */
32336 if (type == long_double_type_node)
32337 error ("use of %<long double%> in AltiVec types is invalid");
32338 else if (type == boolean_type_node)
32339 error ("use of boolean types in AltiVec types is invalid");
32340 else if (TREE_CODE (type) == COMPLEX_TYPE)
32341 error ("use of %<complex%> in AltiVec types is invalid");
32342 else if (DECIMAL_FLOAT_MODE_P (mode))
32343 error ("use of decimal floating point types in AltiVec types is invalid");
32344 else if (!TARGET_VSX)
32346 if (type == long_unsigned_type_node || type == long_integer_type_node)
32348 if (TARGET_64BIT)
32349 error ("use of %<long%> in AltiVec types is invalid for "
32350 "64-bit code without -mvsx");
32351 else if (rs6000_warn_altivec_long)
32352 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32353 "use %<int%>");
32355 else if (type == long_long_unsigned_type_node
32356 || type == long_long_integer_type_node)
32357 error ("use of %<long long%> in AltiVec types is invalid without "
32358 "-mvsx");
32359 else if (type == double_type_node)
32360 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
32363 switch (altivec_type)
32365 case 'v':
32366 unsigned_p = TYPE_UNSIGNED (type);
32367 switch (mode)
32369 case TImode:
32370 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32371 break;
32372 case DImode:
32373 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32374 break;
32375 case SImode:
32376 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32377 break;
32378 case HImode:
32379 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32380 break;
32381 case QImode:
32382 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32383 break;
32384 case SFmode: result = V4SF_type_node; break;
32385 case DFmode: result = V2DF_type_node; break;
32386 /* If the user says 'vector int bool', we may be handed the 'bool'
32387 attribute _before_ the 'vector' attribute, and so select the
32388 proper type in the 'b' case below. */
32389 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
32390 case V2DImode: case V2DFmode:
32391 result = type;
32392 default: break;
32394 break;
32395 case 'b':
32396 switch (mode)
32398 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
32399 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
32400 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
32401 case QImode: case V16QImode: result = bool_V16QI_type_node;
32402 default: break;
32404 break;
32405 case 'p':
32406 switch (mode)
32408 case V8HImode: result = pixel_V8HI_type_node;
32409 default: break;
32411 default: break;
32414 /* Propagate qualifiers attached to the element type
32415 onto the vector type. */
32416 if (result && result != type && TYPE_QUALS (type))
32417 result = build_qualified_type (result, TYPE_QUALS (type));
32419 *no_add_attrs = true; /* No need to hang on to the attribute. */
32421 if (result)
32422 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32424 return NULL_TREE;
32427 /* AltiVec defines four built-in scalar types that serve as vector
32428 elements; we must teach the compiler how to mangle them. */
32430 static const char *
32431 rs6000_mangle_type (const_tree type)
32433 type = TYPE_MAIN_VARIANT (type);
32435 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32436 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32437 return NULL;
32439 if (type == bool_char_type_node) return "U6__boolc";
32440 if (type == bool_short_type_node) return "U6__bools";
32441 if (type == pixel_type_node) return "u7__pixel";
32442 if (type == bool_int_type_node) return "U6__booli";
32443 if (type == bool_long_type_node) return "U6__booll";
32445 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32446 "g" for IBM extended double, no matter whether it is long double (using
32447 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32448 if (TARGET_FLOAT128)
32450 if (type == ieee128_float_type_node)
32451 return "U10__float128";
32453 if (type == ibm128_float_type_node)
32454 return "g";
32456 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
32457 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32460 /* Mangle IBM extended float long double as `g' (__float128) on
32461 powerpc*-linux where long-double-64 previously was the default. */
32462 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32463 && TARGET_ELF
32464 && TARGET_LONG_DOUBLE_128
32465 && !TARGET_IEEEQUAD)
32466 return "g";
32468 /* For all other types, use normal C++ mangling. */
32469 return NULL;
32472 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32473 struct attribute_spec.handler. */
32475 static tree
32476 rs6000_handle_longcall_attribute (tree *node, tree name,
32477 tree args ATTRIBUTE_UNUSED,
32478 int flags ATTRIBUTE_UNUSED,
32479 bool *no_add_attrs)
32481 if (TREE_CODE (*node) != FUNCTION_TYPE
32482 && TREE_CODE (*node) != FIELD_DECL
32483 && TREE_CODE (*node) != TYPE_DECL)
32485 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32486 name);
32487 *no_add_attrs = true;
32490 return NULL_TREE;
32493 /* Set longcall attributes on all functions declared when
32494 rs6000_default_long_calls is true. */
32495 static void
32496 rs6000_set_default_type_attributes (tree type)
32498 if (rs6000_default_long_calls
32499 && (TREE_CODE (type) == FUNCTION_TYPE
32500 || TREE_CODE (type) == METHOD_TYPE))
32501 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32502 NULL_TREE,
32503 TYPE_ATTRIBUTES (type));
32505 #if TARGET_MACHO
32506 darwin_set_default_type_attributes (type);
32507 #endif
32510 /* Return a reference suitable for calling a function with the
32511 longcall attribute. */
32514 rs6000_longcall_ref (rtx call_ref)
32516 const char *call_name;
32517 tree node;
32519 if (GET_CODE (call_ref) != SYMBOL_REF)
32520 return call_ref;
32522 /* System V adds '.' to the internal name, so skip them. */
32523 call_name = XSTR (call_ref, 0);
32524 if (*call_name == '.')
32526 while (*call_name == '.')
32527 call_name++;
32529 node = get_identifier (call_name);
32530 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32533 return force_reg (Pmode, call_ref);
32536 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32537 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32538 #endif
32540 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32541 struct attribute_spec.handler. */
32542 static tree
32543 rs6000_handle_struct_attribute (tree *node, tree name,
32544 tree args ATTRIBUTE_UNUSED,
32545 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32547 tree *type = NULL;
32548 if (DECL_P (*node))
32550 if (TREE_CODE (*node) == TYPE_DECL)
32551 type = &TREE_TYPE (*node);
32553 else
32554 type = node;
32556 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32557 || TREE_CODE (*type) == UNION_TYPE)))
32559 warning (OPT_Wattributes, "%qE attribute ignored", name);
32560 *no_add_attrs = true;
32563 else if ((is_attribute_p ("ms_struct", name)
32564 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32565 || ((is_attribute_p ("gcc_struct", name)
32566 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32568 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32569 name);
32570 *no_add_attrs = true;
32573 return NULL_TREE;
32576 static bool
32577 rs6000_ms_bitfield_layout_p (const_tree record_type)
32579 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32580 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32581 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32584 #ifdef USING_ELFOS_H
32586 /* A get_unnamed_section callback, used for switching to toc_section. */
32588 static void
32589 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32591 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32592 && TARGET_MINIMAL_TOC)
32594 if (!toc_initialized)
32596 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32597 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32598 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32599 fprintf (asm_out_file, "\t.tc ");
32600 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32601 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32602 fprintf (asm_out_file, "\n");
32604 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32605 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32606 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32607 fprintf (asm_out_file, " = .+32768\n");
32608 toc_initialized = 1;
32610 else
32611 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32613 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32615 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32616 if (!toc_initialized)
32618 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32619 toc_initialized = 1;
32622 else
32624 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32625 if (!toc_initialized)
32627 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32628 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32629 fprintf (asm_out_file, " = .+32768\n");
32630 toc_initialized = 1;
32635 /* Implement TARGET_ASM_INIT_SECTIONS. */
32637 static void
32638 rs6000_elf_asm_init_sections (void)
32640 toc_section
32641 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32643 sdata2_section
32644 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32645 SDATA2_SECTION_ASM_OP);
32648 /* Implement TARGET_SELECT_RTX_SECTION. */
32650 static section *
32651 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32652 unsigned HOST_WIDE_INT align)
32654 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32655 return toc_section;
32656 else
32657 return default_elf_select_rtx_section (mode, x, align);
32660 /* For a SYMBOL_REF, set generic flags and then perform some
32661 target-specific processing.
32663 When the AIX ABI is requested on a non-AIX system, replace the
32664 function name with the real name (with a leading .) rather than the
32665 function descriptor name. This saves a lot of overriding code to
32666 read the prefixes. */
32668 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32669 static void
32670 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32672 default_encode_section_info (decl, rtl, first);
32674 if (first
32675 && TREE_CODE (decl) == FUNCTION_DECL
32676 && !TARGET_AIX
32677 && DEFAULT_ABI == ABI_AIX)
32679 rtx sym_ref = XEXP (rtl, 0);
32680 size_t len = strlen (XSTR (sym_ref, 0));
32681 char *str = XALLOCAVEC (char, len + 2);
32682 str[0] = '.';
32683 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32684 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32688 static inline bool
32689 compare_section_name (const char *section, const char *templ)
32691 int len;
32693 len = strlen (templ);
32694 return (strncmp (section, templ, len) == 0
32695 && (section[len] == 0 || section[len] == '.'));
32698 bool
32699 rs6000_elf_in_small_data_p (const_tree decl)
32701 if (rs6000_sdata == SDATA_NONE)
32702 return false;
32704 /* We want to merge strings, so we never consider them small data. */
32705 if (TREE_CODE (decl) == STRING_CST)
32706 return false;
32708 /* Functions are never in the small data area. */
32709 if (TREE_CODE (decl) == FUNCTION_DECL)
32710 return false;
32712 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32714 const char *section = DECL_SECTION_NAME (decl);
32715 if (compare_section_name (section, ".sdata")
32716 || compare_section_name (section, ".sdata2")
32717 || compare_section_name (section, ".gnu.linkonce.s")
32718 || compare_section_name (section, ".sbss")
32719 || compare_section_name (section, ".sbss2")
32720 || compare_section_name (section, ".gnu.linkonce.sb")
32721 || strcmp (section, ".PPC.EMB.sdata0") == 0
32722 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32723 return true;
32725 else
32727 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32729 if (size > 0
32730 && size <= g_switch_value
32731 /* If it's not public, and we're not going to reference it there,
32732 there's no need to put it in the small data section. */
32733 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32734 return true;
32737 return false;
32740 #endif /* USING_ELFOS_H */
32742 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32744 static bool
32745 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32747 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32750 /* Do not place thread-local symbols refs in the object blocks. */
32752 static bool
32753 rs6000_use_blocks_for_decl_p (const_tree decl)
32755 return !DECL_THREAD_LOCAL_P (decl);
32758 /* Return a REG that occurs in ADDR with coefficient 1.
32759 ADDR can be effectively incremented by incrementing REG.
32761 r0 is special and we must not select it as an address
32762 register by this routine since our caller will try to
32763 increment the returned register via an "la" instruction. */
32766 find_addr_reg (rtx addr)
32768 while (GET_CODE (addr) == PLUS)
32770 if (GET_CODE (XEXP (addr, 0)) == REG
32771 && REGNO (XEXP (addr, 0)) != 0)
32772 addr = XEXP (addr, 0);
32773 else if (GET_CODE (XEXP (addr, 1)) == REG
32774 && REGNO (XEXP (addr, 1)) != 0)
32775 addr = XEXP (addr, 1);
32776 else if (CONSTANT_P (XEXP (addr, 0)))
32777 addr = XEXP (addr, 1);
32778 else if (CONSTANT_P (XEXP (addr, 1)))
32779 addr = XEXP (addr, 0);
32780 else
32781 gcc_unreachable ();
32783 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32784 return addr;
32787 void
32788 rs6000_fatal_bad_address (rtx op)
32790 fatal_insn ("bad address", op);
32793 #if TARGET_MACHO
32795 typedef struct branch_island_d {
32796 tree function_name;
32797 tree label_name;
32798 int line_number;
32799 } branch_island;
32802 static vec<branch_island, va_gc> *branch_islands;
32804 /* Remember to generate a branch island for far calls to the given
32805 function. */
32807 static void
32808 add_compiler_branch_island (tree label_name, tree function_name,
32809 int line_number)
32811 branch_island bi = {function_name, label_name, line_number};
32812 vec_safe_push (branch_islands, bi);
32815 /* Generate far-jump branch islands for everything recorded in
32816 branch_islands. Invoked immediately after the last instruction of
32817 the epilogue has been emitted; the branch islands must be appended
32818 to, and contiguous with, the function body. Mach-O stubs are
32819 generated in machopic_output_stub(). */
32821 static void
32822 macho_branch_islands (void)
32824 char tmp_buf[512];
32826 while (!vec_safe_is_empty (branch_islands))
32828 branch_island *bi = &branch_islands->last ();
32829 const char *label = IDENTIFIER_POINTER (bi->label_name);
32830 const char *name = IDENTIFIER_POINTER (bi->function_name);
32831 char name_buf[512];
32832 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32833 if (name[0] == '*' || name[0] == '&')
32834 strcpy (name_buf, name+1);
32835 else
32837 name_buf[0] = '_';
32838 strcpy (name_buf+1, name);
32840 strcpy (tmp_buf, "\n");
32841 strcat (tmp_buf, label);
32842 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32843 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32844 dbxout_stabd (N_SLINE, bi->line_number);
32845 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32846 if (flag_pic)
32848 if (TARGET_LINK_STACK)
32850 char name[32];
32851 get_ppc476_thunk_name (name);
32852 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32853 strcat (tmp_buf, name);
32854 strcat (tmp_buf, "\n");
32855 strcat (tmp_buf, label);
32856 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32858 else
32860 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32861 strcat (tmp_buf, label);
32862 strcat (tmp_buf, "_pic\n");
32863 strcat (tmp_buf, label);
32864 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32867 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32868 strcat (tmp_buf, name_buf);
32869 strcat (tmp_buf, " - ");
32870 strcat (tmp_buf, label);
32871 strcat (tmp_buf, "_pic)\n");
32873 strcat (tmp_buf, "\tmtlr r0\n");
32875 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32876 strcat (tmp_buf, name_buf);
32877 strcat (tmp_buf, " - ");
32878 strcat (tmp_buf, label);
32879 strcat (tmp_buf, "_pic)\n");
32881 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32883 else
32885 strcat (tmp_buf, ":\nlis r12,hi16(");
32886 strcat (tmp_buf, name_buf);
32887 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32888 strcat (tmp_buf, name_buf);
32889 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32891 output_asm_insn (tmp_buf, 0);
32892 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32893 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32894 dbxout_stabd (N_SLINE, bi->line_number);
32895 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32896 branch_islands->pop ();
32900 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32901 already there or not. */
32903 static int
32904 no_previous_def (tree function_name)
32906 branch_island *bi;
32907 unsigned ix;
32909 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32910 if (function_name == bi->function_name)
32911 return 0;
32912 return 1;
32915 /* GET_PREV_LABEL gets the label name from the previous definition of
32916 the function. */
32918 static tree
32919 get_prev_label (tree function_name)
32921 branch_island *bi;
32922 unsigned ix;
32924 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32925 if (function_name == bi->function_name)
32926 return bi->label_name;
32927 return NULL_TREE;
32930 /* INSN is either a function call or a millicode call. It may have an
32931 unconditional jump in its delay slot.
32933 CALL_DEST is the routine we are calling. */
32935 char *
32936 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32937 int cookie_operand_number)
32939 static char buf[256];
32940 if (darwin_emit_branch_islands
32941 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32942 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32944 tree labelname;
32945 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32947 if (no_previous_def (funname))
32949 rtx label_rtx = gen_label_rtx ();
32950 char *label_buf, temp_buf[256];
32951 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32952 CODE_LABEL_NUMBER (label_rtx));
32953 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32954 labelname = get_identifier (label_buf);
32955 add_compiler_branch_island (labelname, funname, insn_line (insn));
32957 else
32958 labelname = get_prev_label (funname);
32960 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32961 instruction will reach 'foo', otherwise link as 'bl L42'".
32962 "L42" should be a 'branch island', that will do a far jump to
32963 'foo'. Branch islands are generated in
32964 macho_branch_islands(). */
32965 sprintf (buf, "jbsr %%z%d,%.246s",
32966 dest_operand_number, IDENTIFIER_POINTER (labelname));
32968 else
32969 sprintf (buf, "bl %%z%d", dest_operand_number);
32970 return buf;
32973 /* Generate PIC and indirect symbol stubs. */
32975 void
32976 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32978 unsigned int length;
32979 char *symbol_name, *lazy_ptr_name;
32980 char *local_label_0;
32981 static int label = 0;
32983 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32984 symb = (*targetm.strip_name_encoding) (symb);
32987 length = strlen (symb);
32988 symbol_name = XALLOCAVEC (char, length + 32);
32989 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
32991 lazy_ptr_name = XALLOCAVEC (char, length + 32);
32992 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
32994 if (flag_pic == 2)
32995 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
32996 else
32997 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
32999 if (flag_pic == 2)
33001 fprintf (file, "\t.align 5\n");
33003 fprintf (file, "%s:\n", stub);
33004 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33006 label++;
33007 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33008 sprintf (local_label_0, "\"L%011d$spb\"", label);
33010 fprintf (file, "\tmflr r0\n");
33011 if (TARGET_LINK_STACK)
33013 char name[32];
33014 get_ppc476_thunk_name (name);
33015 fprintf (file, "\tbl %s\n", name);
33016 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33018 else
33020 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33021 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33023 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33024 lazy_ptr_name, local_label_0);
33025 fprintf (file, "\tmtlr r0\n");
33026 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33027 (TARGET_64BIT ? "ldu" : "lwzu"),
33028 lazy_ptr_name, local_label_0);
33029 fprintf (file, "\tmtctr r12\n");
33030 fprintf (file, "\tbctr\n");
33032 else
33034 fprintf (file, "\t.align 4\n");
33036 fprintf (file, "%s:\n", stub);
33037 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33039 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33040 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33041 (TARGET_64BIT ? "ldu" : "lwzu"),
33042 lazy_ptr_name);
33043 fprintf (file, "\tmtctr r12\n");
33044 fprintf (file, "\tbctr\n");
33047 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33048 fprintf (file, "%s:\n", lazy_ptr_name);
33049 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33050 fprintf (file, "%sdyld_stub_binding_helper\n",
33051 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33054 /* Legitimize PIC addresses. If the address is already
33055 position-independent, we return ORIG. Newly generated
33056 position-independent addresses go into a reg. This is REG if non
33057 zero, otherwise we allocate register(s) as necessary. */
33059 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33062 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33063 rtx reg)
33065 rtx base, offset;
33067 if (reg == NULL && ! reload_in_progress && ! reload_completed)
33068 reg = gen_reg_rtx (Pmode);
33070 if (GET_CODE (orig) == CONST)
33072 rtx reg_temp;
33074 if (GET_CODE (XEXP (orig, 0)) == PLUS
33075 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33076 return orig;
33078 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33080 /* Use a different reg for the intermediate value, as
33081 it will be marked UNCHANGING. */
33082 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33083 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33084 Pmode, reg_temp);
33085 offset =
33086 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33087 Pmode, reg);
33089 if (GET_CODE (offset) == CONST_INT)
33091 if (SMALL_INT (offset))
33092 return plus_constant (Pmode, base, INTVAL (offset));
33093 else if (! reload_in_progress && ! reload_completed)
33094 offset = force_reg (Pmode, offset);
33095 else
33097 rtx mem = force_const_mem (Pmode, orig);
33098 return machopic_legitimize_pic_address (mem, Pmode, reg);
33101 return gen_rtx_PLUS (Pmode, base, offset);
33104 /* Fall back on generic machopic code. */
33105 return machopic_legitimize_pic_address (orig, mode, reg);
33108 /* Output a .machine directive for the Darwin assembler, and call
33109 the generic start_file routine. */
33111 static void
33112 rs6000_darwin_file_start (void)
33114 static const struct
33116 const char *arg;
33117 const char *name;
33118 HOST_WIDE_INT if_set;
33119 } mapping[] = {
33120 { "ppc64", "ppc64", MASK_64BIT },
33121 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33122 { "power4", "ppc970", 0 },
33123 { "G5", "ppc970", 0 },
33124 { "7450", "ppc7450", 0 },
33125 { "7400", "ppc7400", MASK_ALTIVEC },
33126 { "G4", "ppc7400", 0 },
33127 { "750", "ppc750", 0 },
33128 { "740", "ppc750", 0 },
33129 { "G3", "ppc750", 0 },
33130 { "604e", "ppc604e", 0 },
33131 { "604", "ppc604", 0 },
33132 { "603e", "ppc603", 0 },
33133 { "603", "ppc603", 0 },
33134 { "601", "ppc601", 0 },
33135 { NULL, "ppc", 0 } };
33136 const char *cpu_id = "";
33137 size_t i;
33139 rs6000_file_start ();
33140 darwin_file_start ();
33142 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33144 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33145 cpu_id = rs6000_default_cpu;
33147 if (global_options_set.x_rs6000_cpu_index)
33148 cpu_id = processor_target_table[rs6000_cpu_index].name;
33150 /* Look through the mapping array. Pick the first name that either
33151 matches the argument, has a bit set in IF_SET that is also set
33152 in the target flags, or has a NULL name. */
33154 i = 0;
33155 while (mapping[i].arg != NULL
33156 && strcmp (mapping[i].arg, cpu_id) != 0
33157 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33158 i++;
33160 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33163 #endif /* TARGET_MACHO */
33165 #if TARGET_ELF
33166 static int
33167 rs6000_elf_reloc_rw_mask (void)
33169 if (flag_pic)
33170 return 3;
33171 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33172 return 2;
33173 else
33174 return 0;
33177 /* Record an element in the table of global constructors. SYMBOL is
33178 a SYMBOL_REF of the function to be called; PRIORITY is a number
33179 between 0 and MAX_INIT_PRIORITY.
33181 This differs from default_named_section_asm_out_constructor in
33182 that we have special handling for -mrelocatable. */
33184 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33185 static void
33186 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33188 const char *section = ".ctors";
33189 char buf[16];
33191 if (priority != DEFAULT_INIT_PRIORITY)
33193 sprintf (buf, ".ctors.%.5u",
33194 /* Invert the numbering so the linker puts us in the proper
33195 order; constructors are run from right to left, and the
33196 linker sorts in increasing order. */
33197 MAX_INIT_PRIORITY - priority);
33198 section = buf;
33201 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33202 assemble_align (POINTER_SIZE);
33204 if (DEFAULT_ABI == ABI_V4
33205 && (TARGET_RELOCATABLE || flag_pic > 1))
33207 fputs ("\t.long (", asm_out_file);
33208 output_addr_const (asm_out_file, symbol);
33209 fputs (")@fixup\n", asm_out_file);
33211 else
33212 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33215 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33216 static void
33217 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33219 const char *section = ".dtors";
33220 char buf[16];
33222 if (priority != DEFAULT_INIT_PRIORITY)
33224 sprintf (buf, ".dtors.%.5u",
33225 /* Invert the numbering so the linker puts us in the proper
33226 order; constructors are run from right to left, and the
33227 linker sorts in increasing order. */
33228 MAX_INIT_PRIORITY - priority);
33229 section = buf;
33232 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33233 assemble_align (POINTER_SIZE);
33235 if (DEFAULT_ABI == ABI_V4
33236 && (TARGET_RELOCATABLE || flag_pic > 1))
33238 fputs ("\t.long (", asm_out_file);
33239 output_addr_const (asm_out_file, symbol);
33240 fputs (")@fixup\n", asm_out_file);
33242 else
33243 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33246 void
33247 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33249 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33251 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33252 ASM_OUTPUT_LABEL (file, name);
33253 fputs (DOUBLE_INT_ASM_OP, file);
33254 rs6000_output_function_entry (file, name);
33255 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33256 if (DOT_SYMBOLS)
33258 fputs ("\t.size\t", file);
33259 assemble_name (file, name);
33260 fputs (",24\n\t.type\t.", file);
33261 assemble_name (file, name);
33262 fputs (",@function\n", file);
33263 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33265 fputs ("\t.globl\t.", file);
33266 assemble_name (file, name);
33267 putc ('\n', file);
33270 else
33271 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33272 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33273 rs6000_output_function_entry (file, name);
33274 fputs (":\n", file);
33275 return;
33278 if (DEFAULT_ABI == ABI_V4
33279 && (TARGET_RELOCATABLE || flag_pic > 1)
33280 && !TARGET_SECURE_PLT
33281 && (get_pool_size () != 0 || crtl->profile)
33282 && uses_TOC ())
33284 char buf[256];
33286 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33288 fprintf (file, "\t.long ");
33289 assemble_name (file, toc_label_name);
33290 need_toc_init = 1;
33291 putc ('-', file);
33292 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33293 assemble_name (file, buf);
33294 putc ('\n', file);
33297 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33298 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33300 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33302 char buf[256];
33304 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33306 fprintf (file, "\t.quad .TOC.-");
33307 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33308 assemble_name (file, buf);
33309 putc ('\n', file);
33312 if (DEFAULT_ABI == ABI_AIX)
33314 const char *desc_name, *orig_name;
33316 orig_name = (*targetm.strip_name_encoding) (name);
33317 desc_name = orig_name;
33318 while (*desc_name == '.')
33319 desc_name++;
33321 if (TREE_PUBLIC (decl))
33322 fprintf (file, "\t.globl %s\n", desc_name);
33324 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33325 fprintf (file, "%s:\n", desc_name);
33326 fprintf (file, "\t.long %s\n", orig_name);
33327 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33328 fputs ("\t.long 0\n", file);
33329 fprintf (file, "\t.previous\n");
33331 ASM_OUTPUT_LABEL (file, name);
33334 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33335 static void
33336 rs6000_elf_file_end (void)
33338 #ifdef HAVE_AS_GNU_ATTRIBUTE
33339 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33341 if (rs6000_passes_float)
33342 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
33343 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
33344 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
33345 : 2));
33346 if (rs6000_passes_vector)
33347 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33348 (TARGET_ALTIVEC_ABI ? 2
33349 : TARGET_SPE_ABI ? 3
33350 : 1));
33351 if (rs6000_returns_struct)
33352 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33353 aix_struct_return ? 2 : 1);
33355 #endif
33356 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33357 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33358 file_end_indicate_exec_stack ();
33359 #endif
33361 if (flag_split_stack)
33362 file_end_indicate_split_stack ();
33364 if (cpu_builtin_p)
33366 /* We have expanded a CPU builtin, so we need to emit a reference to
33367 the special symbol that LIBC uses to declare it supports the
33368 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33369 switch_to_section (data_section);
33370 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33371 fprintf (asm_out_file, "\t%s %s\n",
33372 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33375 #endif
33377 #if TARGET_XCOFF
33379 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33380 #define HAVE_XCOFF_DWARF_EXTRAS 0
33381 #endif
33383 static enum unwind_info_type
33384 rs6000_xcoff_debug_unwind_info (void)
33386 return UI_NONE;
33389 static void
33390 rs6000_xcoff_asm_output_anchor (rtx symbol)
33392 char buffer[100];
33394 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33395 SYMBOL_REF_BLOCK_OFFSET (symbol));
33396 fprintf (asm_out_file, "%s", SET_ASM_OP);
33397 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33398 fprintf (asm_out_file, ",");
33399 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33400 fprintf (asm_out_file, "\n");
33403 static void
33404 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33406 fputs (GLOBAL_ASM_OP, stream);
33407 RS6000_OUTPUT_BASENAME (stream, name);
33408 putc ('\n', stream);
33411 /* A get_unnamed_decl callback, used for read-only sections. PTR
33412 points to the section string variable. */
33414 static void
33415 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33417 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33418 *(const char *const *) directive,
33419 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33422 /* Likewise for read-write sections. */
33424 static void
33425 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33427 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33428 *(const char *const *) directive,
33429 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33432 static void
33433 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33435 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33436 *(const char *const *) directive,
33437 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33440 /* A get_unnamed_section callback, used for switching to toc_section. */
33442 static void
33443 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33445 if (TARGET_MINIMAL_TOC)
33447 /* toc_section is always selected at least once from
33448 rs6000_xcoff_file_start, so this is guaranteed to
33449 always be defined once and only once in each file. */
33450 if (!toc_initialized)
33452 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33453 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33454 toc_initialized = 1;
33456 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33457 (TARGET_32BIT ? "" : ",3"));
33459 else
33460 fputs ("\t.toc\n", asm_out_file);
33463 /* Implement TARGET_ASM_INIT_SECTIONS. */
33465 static void
33466 rs6000_xcoff_asm_init_sections (void)
33468 read_only_data_section
33469 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33470 &xcoff_read_only_section_name);
33472 private_data_section
33473 = get_unnamed_section (SECTION_WRITE,
33474 rs6000_xcoff_output_readwrite_section_asm_op,
33475 &xcoff_private_data_section_name);
33477 tls_data_section
33478 = get_unnamed_section (SECTION_TLS,
33479 rs6000_xcoff_output_tls_section_asm_op,
33480 &xcoff_tls_data_section_name);
33482 tls_private_data_section
33483 = get_unnamed_section (SECTION_TLS,
33484 rs6000_xcoff_output_tls_section_asm_op,
33485 &xcoff_private_data_section_name);
33487 read_only_private_data_section
33488 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33489 &xcoff_private_data_section_name);
33491 toc_section
33492 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33494 readonly_data_section = read_only_data_section;
33497 static int
33498 rs6000_xcoff_reloc_rw_mask (void)
33500 return 3;
33503 static void
33504 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33505 tree decl ATTRIBUTE_UNUSED)
33507 int smclass;
33508 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33510 if (flags & SECTION_EXCLUDE)
33511 smclass = 4;
33512 else if (flags & SECTION_DEBUG)
33514 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33515 return;
33517 else if (flags & SECTION_CODE)
33518 smclass = 0;
33519 else if (flags & SECTION_TLS)
33520 smclass = 3;
33521 else if (flags & SECTION_WRITE)
33522 smclass = 2;
33523 else
33524 smclass = 1;
33526 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33527 (flags & SECTION_CODE) ? "." : "",
33528 name, suffix[smclass], flags & SECTION_ENTSIZE);
33531 #define IN_NAMED_SECTION(DECL) \
33532 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33533 && DECL_SECTION_NAME (DECL) != NULL)
33535 static section *
33536 rs6000_xcoff_select_section (tree decl, int reloc,
33537 unsigned HOST_WIDE_INT align)
33539 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33540 named section. */
33541 if (align > BIGGEST_ALIGNMENT)
33543 resolve_unique_section (decl, reloc, true);
33544 if (IN_NAMED_SECTION (decl))
33545 return get_named_section (decl, NULL, reloc);
33548 if (decl_readonly_section (decl, reloc))
33550 if (TREE_PUBLIC (decl))
33551 return read_only_data_section;
33552 else
33553 return read_only_private_data_section;
33555 else
33557 #if HAVE_AS_TLS
33558 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33560 if (TREE_PUBLIC (decl))
33561 return tls_data_section;
33562 else if (bss_initializer_p (decl))
33564 /* Convert to COMMON to emit in BSS. */
33565 DECL_COMMON (decl) = 1;
33566 return tls_comm_section;
33568 else
33569 return tls_private_data_section;
33571 else
33572 #endif
33573 if (TREE_PUBLIC (decl))
33574 return data_section;
33575 else
33576 return private_data_section;
33580 static void
33581 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33583 const char *name;
33585 /* Use select_section for private data and uninitialized data with
33586 alignment <= BIGGEST_ALIGNMENT. */
33587 if (!TREE_PUBLIC (decl)
33588 || DECL_COMMON (decl)
33589 || (DECL_INITIAL (decl) == NULL_TREE
33590 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33591 || DECL_INITIAL (decl) == error_mark_node
33592 || (flag_zero_initialized_in_bss
33593 && initializer_zerop (DECL_INITIAL (decl))))
33594 return;
33596 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33597 name = (*targetm.strip_name_encoding) (name);
33598 set_decl_section_name (decl, name);
33601 /* Select section for constant in constant pool.
33603 On RS/6000, all constants are in the private read-only data area.
33604 However, if this is being placed in the TOC it must be output as a
33605 toc entry. */
33607 static section *
33608 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33609 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33611 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33612 return toc_section;
33613 else
33614 return read_only_private_data_section;
33617 /* Remove any trailing [DS] or the like from the symbol name. */
33619 static const char *
33620 rs6000_xcoff_strip_name_encoding (const char *name)
33622 size_t len;
33623 if (*name == '*')
33624 name++;
33625 len = strlen (name);
33626 if (name[len - 1] == ']')
33627 return ggc_alloc_string (name, len - 4);
33628 else
33629 return name;
33632 /* Section attributes. AIX is always PIC. */
33634 static unsigned int
33635 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33637 unsigned int align;
33638 unsigned int flags = default_section_type_flags (decl, name, reloc);
33640 /* Align to at least UNIT size. */
33641 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33642 align = MIN_UNITS_PER_WORD;
33643 else
33644 /* Increase alignment of large objects if not already stricter. */
33645 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33646 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33647 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33649 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33652 /* Output at beginning of assembler file.
33654 Initialize the section names for the RS/6000 at this point.
33656 Specify filename, including full path, to assembler.
33658 We want to go into the TOC section so at least one .toc will be emitted.
33659 Also, in order to output proper .bs/.es pairs, we need at least one static
33660 [RW] section emitted.
33662 Finally, declare mcount when profiling to make the assembler happy. */
33664 static void
33665 rs6000_xcoff_file_start (void)
33667 rs6000_gen_section_name (&xcoff_bss_section_name,
33668 main_input_filename, ".bss_");
33669 rs6000_gen_section_name (&xcoff_private_data_section_name,
33670 main_input_filename, ".rw_");
33671 rs6000_gen_section_name (&xcoff_read_only_section_name,
33672 main_input_filename, ".ro_");
33673 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33674 main_input_filename, ".tls_");
33675 rs6000_gen_section_name (&xcoff_tbss_section_name,
33676 main_input_filename, ".tbss_[UL]");
33678 fputs ("\t.file\t", asm_out_file);
33679 output_quoted_string (asm_out_file, main_input_filename);
33680 fputc ('\n', asm_out_file);
33681 if (write_symbols != NO_DEBUG)
33682 switch_to_section (private_data_section);
33683 switch_to_section (toc_section);
33684 switch_to_section (text_section);
33685 if (profile_flag)
33686 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33687 rs6000_file_start ();
33690 /* Output at end of assembler file.
33691 On the RS/6000, referencing data should automatically pull in text. */
33693 static void
33694 rs6000_xcoff_file_end (void)
33696 switch_to_section (text_section);
33697 fputs ("_section_.text:\n", asm_out_file);
33698 switch_to_section (data_section);
33699 fputs (TARGET_32BIT
33700 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33701 asm_out_file);
33704 struct declare_alias_data
33706 FILE *file;
33707 bool function_descriptor;
33710 /* Declare alias N. A helper function for for_node_and_aliases. */
33712 static bool
33713 rs6000_declare_alias (struct symtab_node *n, void *d)
33715 struct declare_alias_data *data = (struct declare_alias_data *)d;
33716 /* Main symbol is output specially, because varasm machinery does part of
33717 the job for us - we do not need to declare .globl/lglobs and such. */
33718 if (!n->alias || n->weakref)
33719 return false;
33721 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33722 return false;
33724 /* Prevent assemble_alias from trying to use .set pseudo operation
33725 that does not behave as expected by the middle-end. */
33726 TREE_ASM_WRITTEN (n->decl) = true;
33728 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33729 char *buffer = (char *) alloca (strlen (name) + 2);
33730 char *p;
33731 int dollar_inside = 0;
33733 strcpy (buffer, name);
33734 p = strchr (buffer, '$');
33735 while (p) {
33736 *p = '_';
33737 dollar_inside++;
33738 p = strchr (p + 1, '$');
33740 if (TREE_PUBLIC (n->decl))
33742 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33744 if (dollar_inside) {
33745 if (data->function_descriptor)
33746 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33747 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33749 if (data->function_descriptor)
33751 fputs ("\t.globl .", data->file);
33752 RS6000_OUTPUT_BASENAME (data->file, buffer);
33753 putc ('\n', data->file);
33755 fputs ("\t.globl ", data->file);
33756 RS6000_OUTPUT_BASENAME (data->file, buffer);
33757 putc ('\n', data->file);
33759 #ifdef ASM_WEAKEN_DECL
33760 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33761 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33762 #endif
33764 else
33766 if (dollar_inside)
33768 if (data->function_descriptor)
33769 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33770 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33772 if (data->function_descriptor)
33774 fputs ("\t.lglobl .", data->file);
33775 RS6000_OUTPUT_BASENAME (data->file, buffer);
33776 putc ('\n', data->file);
33778 fputs ("\t.lglobl ", data->file);
33779 RS6000_OUTPUT_BASENAME (data->file, buffer);
33780 putc ('\n', data->file);
33782 if (data->function_descriptor)
33783 fputs (".", data->file);
33784 RS6000_OUTPUT_BASENAME (data->file, buffer);
33785 fputs (":\n", data->file);
33786 return false;
33789 /* This macro produces the initial definition of a function name.
33790 On the RS/6000, we need to place an extra '.' in the function name and
33791 output the function descriptor.
33792 Dollar signs are converted to underscores.
33794 The csect for the function will have already been created when
33795 text_section was selected. We do have to go back to that csect, however.
33797 The third and fourth parameters to the .function pseudo-op (16 and 044)
33798 are placeholders which no longer have any use.
33800 Because AIX assembler's .set command has unexpected semantics, we output
33801 all aliases as alternative labels in front of the definition. */
33803 void
33804 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33806 char *buffer = (char *) alloca (strlen (name) + 1);
33807 char *p;
33808 int dollar_inside = 0;
33809 struct declare_alias_data data = {file, false};
33811 strcpy (buffer, name);
33812 p = strchr (buffer, '$');
33813 while (p) {
33814 *p = '_';
33815 dollar_inside++;
33816 p = strchr (p + 1, '$');
33818 if (TREE_PUBLIC (decl))
33820 if (!RS6000_WEAK || !DECL_WEAK (decl))
33822 if (dollar_inside) {
33823 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33824 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33826 fputs ("\t.globl .", file);
33827 RS6000_OUTPUT_BASENAME (file, buffer);
33828 putc ('\n', file);
33831 else
33833 if (dollar_inside) {
33834 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33835 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33837 fputs ("\t.lglobl .", file);
33838 RS6000_OUTPUT_BASENAME (file, buffer);
33839 putc ('\n', file);
33841 fputs ("\t.csect ", file);
33842 RS6000_OUTPUT_BASENAME (file, buffer);
33843 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33844 RS6000_OUTPUT_BASENAME (file, buffer);
33845 fputs (":\n", file);
33846 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
33847 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33848 RS6000_OUTPUT_BASENAME (file, buffer);
33849 fputs (", TOC[tc0], 0\n", file);
33850 in_section = NULL;
33851 switch_to_section (function_section (decl));
33852 putc ('.', file);
33853 RS6000_OUTPUT_BASENAME (file, buffer);
33854 fputs (":\n", file);
33855 data.function_descriptor = true;
33856 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
33857 if (!DECL_IGNORED_P (decl))
33859 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33860 xcoffout_declare_function (file, decl, buffer);
33861 else if (write_symbols == DWARF2_DEBUG)
33863 name = (*targetm.strip_name_encoding) (name);
33864 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33867 return;
33870 /* This macro produces the initial definition of a object (variable) name.
33871 Because AIX assembler's .set command has unexpected semantics, we output
33872 all aliases as alternative labels in front of the definition. */
33874 void
33875 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33877 struct declare_alias_data data = {file, false};
33878 RS6000_OUTPUT_BASENAME (file, name);
33879 fputs (":\n", file);
33880 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
33883 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33885 void
33886 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33888 fputs (integer_asm_op (size, FALSE), file);
33889 assemble_name (file, label);
33890 fputs ("-$", file);
33893 /* Output a symbol offset relative to the dbase for the current object.
33894 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33895 signed offsets.
33897 __gcc_unwind_dbase is embedded in all executables/libraries through
33898 libgcc/config/rs6000/crtdbase.S. */
33900 void
33901 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
33903 fputs (integer_asm_op (size, FALSE), file);
33904 assemble_name (file, label);
33905 fputs("-__gcc_unwind_dbase", file);
33908 #ifdef HAVE_AS_TLS
33909 static void
33910 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
33912 rtx symbol;
33913 int flags;
33915 default_encode_section_info (decl, rtl, first);
33917 /* Careful not to prod global register variables. */
33918 if (!MEM_P (rtl))
33919 return;
33920 symbol = XEXP (rtl, 0);
33921 if (GET_CODE (symbol) != SYMBOL_REF)
33922 return;
33924 flags = SYMBOL_REF_FLAGS (symbol);
33926 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33927 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
33929 SYMBOL_REF_FLAGS (symbol) = flags;
33931 #endif /* HAVE_AS_TLS */
33932 #endif /* TARGET_XCOFF */
33934 /* Return true if INSN should not be copied. */
33936 static bool
33937 rs6000_cannot_copy_insn_p (rtx_insn *insn)
33939 return recog_memoized (insn) >= 0
33940 && get_attr_cannot_copy (insn);
33943 /* Compute a (partial) cost for rtx X. Return true if the complete
33944 cost has been computed, and false if subexpressions should be
33945 scanned. In either case, *TOTAL contains the cost result. */
33947 static bool
33948 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
33949 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
33951 int code = GET_CODE (x);
33953 switch (code)
33955 /* On the RS/6000, if it is valid in the insn, it is free. */
33956 case CONST_INT:
33957 if (((outer_code == SET
33958 || outer_code == PLUS
33959 || outer_code == MINUS)
33960 && (satisfies_constraint_I (x)
33961 || satisfies_constraint_L (x)))
33962 || (outer_code == AND
33963 && (satisfies_constraint_K (x)
33964 || (mode == SImode
33965 ? satisfies_constraint_L (x)
33966 : satisfies_constraint_J (x))))
33967 || ((outer_code == IOR || outer_code == XOR)
33968 && (satisfies_constraint_K (x)
33969 || (mode == SImode
33970 ? satisfies_constraint_L (x)
33971 : satisfies_constraint_J (x))))
33972 || outer_code == ASHIFT
33973 || outer_code == ASHIFTRT
33974 || outer_code == LSHIFTRT
33975 || outer_code == ROTATE
33976 || outer_code == ROTATERT
33977 || outer_code == ZERO_EXTRACT
33978 || (outer_code == MULT
33979 && satisfies_constraint_I (x))
33980 || ((outer_code == DIV || outer_code == UDIV
33981 || outer_code == MOD || outer_code == UMOD)
33982 && exact_log2 (INTVAL (x)) >= 0)
33983 || (outer_code == COMPARE
33984 && (satisfies_constraint_I (x)
33985 || satisfies_constraint_K (x)))
33986 || ((outer_code == EQ || outer_code == NE)
33987 && (satisfies_constraint_I (x)
33988 || satisfies_constraint_K (x)
33989 || (mode == SImode
33990 ? satisfies_constraint_L (x)
33991 : satisfies_constraint_J (x))))
33992 || (outer_code == GTU
33993 && satisfies_constraint_I (x))
33994 || (outer_code == LTU
33995 && satisfies_constraint_P (x)))
33997 *total = 0;
33998 return true;
34000 else if ((outer_code == PLUS
34001 && reg_or_add_cint_operand (x, VOIDmode))
34002 || (outer_code == MINUS
34003 && reg_or_sub_cint_operand (x, VOIDmode))
34004 || ((outer_code == SET
34005 || outer_code == IOR
34006 || outer_code == XOR)
34007 && (INTVAL (x)
34008 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34010 *total = COSTS_N_INSNS (1);
34011 return true;
34013 /* FALLTHRU */
34015 case CONST_DOUBLE:
34016 case CONST_WIDE_INT:
34017 case CONST:
34018 case HIGH:
34019 case SYMBOL_REF:
34020 case MEM:
34021 /* When optimizing for size, MEM should be slightly more expensive
34022 than generating address, e.g., (plus (reg) (const)).
34023 L1 cache latency is about two instructions. */
34024 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34025 return true;
34027 case LABEL_REF:
34028 *total = 0;
34029 return true;
34031 case PLUS:
34032 case MINUS:
34033 if (FLOAT_MODE_P (mode))
34034 *total = rs6000_cost->fp;
34035 else
34036 *total = COSTS_N_INSNS (1);
34037 return false;
34039 case MULT:
34040 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34041 && satisfies_constraint_I (XEXP (x, 1)))
34043 if (INTVAL (XEXP (x, 1)) >= -256
34044 && INTVAL (XEXP (x, 1)) <= 255)
34045 *total = rs6000_cost->mulsi_const9;
34046 else
34047 *total = rs6000_cost->mulsi_const;
34049 else if (mode == SFmode)
34050 *total = rs6000_cost->fp;
34051 else if (FLOAT_MODE_P (mode))
34052 *total = rs6000_cost->dmul;
34053 else if (mode == DImode)
34054 *total = rs6000_cost->muldi;
34055 else
34056 *total = rs6000_cost->mulsi;
34057 return false;
34059 case FMA:
34060 if (mode == SFmode)
34061 *total = rs6000_cost->fp;
34062 else
34063 *total = rs6000_cost->dmul;
34064 break;
34066 case DIV:
34067 case MOD:
34068 if (FLOAT_MODE_P (mode))
34070 *total = mode == DFmode ? rs6000_cost->ddiv
34071 : rs6000_cost->sdiv;
34072 return false;
34074 /* FALLTHRU */
34076 case UDIV:
34077 case UMOD:
34078 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34079 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34081 if (code == DIV || code == MOD)
34082 /* Shift, addze */
34083 *total = COSTS_N_INSNS (2);
34084 else
34085 /* Shift */
34086 *total = COSTS_N_INSNS (1);
34088 else
34090 if (GET_MODE (XEXP (x, 1)) == DImode)
34091 *total = rs6000_cost->divdi;
34092 else
34093 *total = rs6000_cost->divsi;
34095 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34096 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34097 *total += COSTS_N_INSNS (2);
34098 return false;
34100 case CTZ:
34101 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34102 return false;
34104 case FFS:
34105 *total = COSTS_N_INSNS (4);
34106 return false;
34108 case POPCOUNT:
34109 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34110 return false;
34112 case PARITY:
34113 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34114 return false;
34116 case NOT:
34117 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34118 *total = 0;
34119 else
34120 *total = COSTS_N_INSNS (1);
34121 return false;
34123 case AND:
34124 if (CONST_INT_P (XEXP (x, 1)))
34126 rtx left = XEXP (x, 0);
34127 rtx_code left_code = GET_CODE (left);
34129 /* rotate-and-mask: 1 insn. */
34130 if ((left_code == ROTATE
34131 || left_code == ASHIFT
34132 || left_code == LSHIFTRT)
34133 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34135 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34136 if (!CONST_INT_P (XEXP (left, 1)))
34137 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34138 *total += COSTS_N_INSNS (1);
34139 return true;
34142 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34143 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34144 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34145 || (val & 0xffff) == val
34146 || (val & 0xffff0000) == val
34147 || ((val & 0xffff) == 0 && mode == SImode))
34149 *total = rtx_cost (left, mode, AND, 0, speed);
34150 *total += COSTS_N_INSNS (1);
34151 return true;
34154 /* 2 insns. */
34155 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34157 *total = rtx_cost (left, mode, AND, 0, speed);
34158 *total += COSTS_N_INSNS (2);
34159 return true;
34163 *total = COSTS_N_INSNS (1);
34164 return false;
34166 case IOR:
34167 /* FIXME */
34168 *total = COSTS_N_INSNS (1);
34169 return true;
34171 case CLZ:
34172 case XOR:
34173 case ZERO_EXTRACT:
34174 *total = COSTS_N_INSNS (1);
34175 return false;
34177 case ASHIFT:
34178 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34179 the sign extend and shift separately within the insn. */
34180 if (TARGET_EXTSWSLI && mode == DImode
34181 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34182 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34184 *total = 0;
34185 return false;
34187 /* fall through */
34189 case ASHIFTRT:
34190 case LSHIFTRT:
34191 case ROTATE:
34192 case ROTATERT:
34193 /* Handle mul_highpart. */
34194 if (outer_code == TRUNCATE
34195 && GET_CODE (XEXP (x, 0)) == MULT)
34197 if (mode == DImode)
34198 *total = rs6000_cost->muldi;
34199 else
34200 *total = rs6000_cost->mulsi;
34201 return true;
34203 else if (outer_code == AND)
34204 *total = 0;
34205 else
34206 *total = COSTS_N_INSNS (1);
34207 return false;
34209 case SIGN_EXTEND:
34210 case ZERO_EXTEND:
34211 if (GET_CODE (XEXP (x, 0)) == MEM)
34212 *total = 0;
34213 else
34214 *total = COSTS_N_INSNS (1);
34215 return false;
34217 case COMPARE:
34218 case NEG:
34219 case ABS:
34220 if (!FLOAT_MODE_P (mode))
34222 *total = COSTS_N_INSNS (1);
34223 return false;
34225 /* FALLTHRU */
34227 case FLOAT:
34228 case UNSIGNED_FLOAT:
34229 case FIX:
34230 case UNSIGNED_FIX:
34231 case FLOAT_TRUNCATE:
34232 *total = rs6000_cost->fp;
34233 return false;
34235 case FLOAT_EXTEND:
34236 if (mode == DFmode)
34237 *total = rs6000_cost->sfdf_convert;
34238 else
34239 *total = rs6000_cost->fp;
34240 return false;
34242 case UNSPEC:
34243 switch (XINT (x, 1))
34245 case UNSPEC_FRSP:
34246 *total = rs6000_cost->fp;
34247 return true;
34249 default:
34250 break;
34252 break;
34254 case CALL:
34255 case IF_THEN_ELSE:
34256 if (!speed)
34258 *total = COSTS_N_INSNS (1);
34259 return true;
34261 else if (FLOAT_MODE_P (mode)
34262 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
34264 *total = rs6000_cost->fp;
34265 return false;
34267 break;
34269 case NE:
34270 case EQ:
34271 case GTU:
34272 case LTU:
34273 /* Carry bit requires mode == Pmode.
34274 NEG or PLUS already counted so only add one. */
34275 if (mode == Pmode
34276 && (outer_code == NEG || outer_code == PLUS))
34278 *total = COSTS_N_INSNS (1);
34279 return true;
34281 if (outer_code == SET)
34283 if (XEXP (x, 1) == const0_rtx)
34285 if (TARGET_ISEL && !TARGET_MFCRF)
34286 *total = COSTS_N_INSNS (8);
34287 else
34288 *total = COSTS_N_INSNS (2);
34289 return true;
34291 else
34293 *total = COSTS_N_INSNS (3);
34294 return false;
34297 /* FALLTHRU */
34299 case GT:
34300 case LT:
34301 case UNORDERED:
34302 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34304 if (TARGET_ISEL && !TARGET_MFCRF)
34305 *total = COSTS_N_INSNS (8);
34306 else
34307 *total = COSTS_N_INSNS (2);
34308 return true;
34310 /* CC COMPARE. */
34311 if (outer_code == COMPARE)
34313 *total = 0;
34314 return true;
34316 break;
34318 default:
34319 break;
34322 return false;
34325 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34327 static bool
34328 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34329 int opno, int *total, bool speed)
34331 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34333 fprintf (stderr,
34334 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34335 "opno = %d, total = %d, speed = %s, x:\n",
34336 ret ? "complete" : "scan inner",
34337 GET_MODE_NAME (mode),
34338 GET_RTX_NAME (outer_code),
34339 opno,
34340 *total,
34341 speed ? "true" : "false");
34343 debug_rtx (x);
34345 return ret;
34348 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34350 static int
34351 rs6000_debug_address_cost (rtx x, machine_mode mode,
34352 addr_space_t as, bool speed)
34354 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34356 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34357 ret, speed ? "true" : "false");
34358 debug_rtx (x);
34360 return ret;
34364 /* A C expression returning the cost of moving data from a register of class
34365 CLASS1 to one of CLASS2. */
34367 static int
34368 rs6000_register_move_cost (machine_mode mode,
34369 reg_class_t from, reg_class_t to)
34371 int ret;
34373 if (TARGET_DEBUG_COST)
34374 dbg_cost_ctrl++;
34376 /* Moves from/to GENERAL_REGS. */
34377 if (reg_classes_intersect_p (to, GENERAL_REGS)
34378 || reg_classes_intersect_p (from, GENERAL_REGS))
34380 reg_class_t rclass = from;
34382 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34383 rclass = to;
34385 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34386 ret = (rs6000_memory_move_cost (mode, rclass, false)
34387 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34389 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34390 shift. */
34391 else if (rclass == CR_REGS)
34392 ret = 4;
34394 /* For those processors that have slow LR/CTR moves, make them more
34395 expensive than memory in order to bias spills to memory .*/
34396 else if ((rs6000_cpu == PROCESSOR_POWER6
34397 || rs6000_cpu == PROCESSOR_POWER7
34398 || rs6000_cpu == PROCESSOR_POWER8
34399 || rs6000_cpu == PROCESSOR_POWER9)
34400 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34401 ret = 6 * hard_regno_nregs[0][mode];
34403 else
34404 /* A move will cost one instruction per GPR moved. */
34405 ret = 2 * hard_regno_nregs[0][mode];
34408 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34409 else if (VECTOR_MEM_VSX_P (mode)
34410 && reg_classes_intersect_p (to, VSX_REGS)
34411 && reg_classes_intersect_p (from, VSX_REGS))
34412 ret = 2 * hard_regno_nregs[32][mode];
34414 /* Moving between two similar registers is just one instruction. */
34415 else if (reg_classes_intersect_p (to, from))
34416 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34418 /* Everything else has to go through GENERAL_REGS. */
34419 else
34420 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34421 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34423 if (TARGET_DEBUG_COST)
34425 if (dbg_cost_ctrl == 1)
34426 fprintf (stderr,
34427 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34428 ret, GET_MODE_NAME (mode), reg_class_names[from],
34429 reg_class_names[to]);
34430 dbg_cost_ctrl--;
34433 return ret;
34436 /* A C expressions returning the cost of moving data of MODE from a register to
34437 or from memory. */
34439 static int
34440 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34441 bool in ATTRIBUTE_UNUSED)
34443 int ret;
34445 if (TARGET_DEBUG_COST)
34446 dbg_cost_ctrl++;
34448 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34449 ret = 4 * hard_regno_nregs[0][mode];
34450 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34451 || reg_classes_intersect_p (rclass, VSX_REGS)))
34452 ret = 4 * hard_regno_nregs[32][mode];
34453 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34454 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
34455 else
34456 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34458 if (TARGET_DEBUG_COST)
34460 if (dbg_cost_ctrl == 1)
34461 fprintf (stderr,
34462 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34463 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34464 dbg_cost_ctrl--;
34467 return ret;
34470 /* Returns a code for a target-specific builtin that implements
34471 reciprocal of the function, or NULL_TREE if not available. */
34473 static tree
34474 rs6000_builtin_reciprocal (tree fndecl)
34476 switch (DECL_FUNCTION_CODE (fndecl))
34478 case VSX_BUILTIN_XVSQRTDP:
34479 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34480 return NULL_TREE;
34482 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34484 case VSX_BUILTIN_XVSQRTSP:
34485 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34486 return NULL_TREE;
34488 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34490 default:
34491 return NULL_TREE;
34495 /* Load up a constant. If the mode is a vector mode, splat the value across
34496 all of the vector elements. */
34498 static rtx
34499 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34501 rtx reg;
34503 if (mode == SFmode || mode == DFmode)
34505 rtx d = const_double_from_real_value (dconst, mode);
34506 reg = force_reg (mode, d);
34508 else if (mode == V4SFmode)
34510 rtx d = const_double_from_real_value (dconst, SFmode);
34511 rtvec v = gen_rtvec (4, d, d, d, d);
34512 reg = gen_reg_rtx (mode);
34513 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34515 else if (mode == V2DFmode)
34517 rtx d = const_double_from_real_value (dconst, DFmode);
34518 rtvec v = gen_rtvec (2, d, d);
34519 reg = gen_reg_rtx (mode);
34520 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34522 else
34523 gcc_unreachable ();
34525 return reg;
34528 /* Generate an FMA instruction. */
34530 static void
34531 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34533 machine_mode mode = GET_MODE (target);
34534 rtx dst;
34536 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34537 gcc_assert (dst != NULL);
34539 if (dst != target)
34540 emit_move_insn (target, dst);
34543 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34545 static void
34546 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34548 machine_mode mode = GET_MODE (dst);
34549 rtx r;
34551 /* This is a tad more complicated, since the fnma_optab is for
34552 a different expression: fma(-m1, m2, a), which is the same
34553 thing except in the case of signed zeros.
34555 Fortunately we know that if FMA is supported that FNMSUB is
34556 also supported in the ISA. Just expand it directly. */
34558 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34560 r = gen_rtx_NEG (mode, a);
34561 r = gen_rtx_FMA (mode, m1, m2, r);
34562 r = gen_rtx_NEG (mode, r);
34563 emit_insn (gen_rtx_SET (dst, r));
34566 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34567 add a reg_note saying that this was a division. Support both scalar and
34568 vector divide. Assumes no trapping math and finite arguments. */
34570 void
34571 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34573 machine_mode mode = GET_MODE (dst);
34574 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34575 int i;
34577 /* Low precision estimates guarantee 5 bits of accuracy. High
34578 precision estimates guarantee 14 bits of accuracy. SFmode
34579 requires 23 bits of accuracy. DFmode requires 52 bits of
34580 accuracy. Each pass at least doubles the accuracy, leading
34581 to the following. */
34582 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34583 if (mode == DFmode || mode == V2DFmode)
34584 passes++;
34586 enum insn_code code = optab_handler (smul_optab, mode);
34587 insn_gen_fn gen_mul = GEN_FCN (code);
34589 gcc_assert (code != CODE_FOR_nothing);
34591 one = rs6000_load_constant_and_splat (mode, dconst1);
34593 /* x0 = 1./d estimate */
34594 x0 = gen_reg_rtx (mode);
34595 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34596 UNSPEC_FRES)));
34598 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34599 if (passes > 1) {
34601 /* e0 = 1. - d * x0 */
34602 e0 = gen_reg_rtx (mode);
34603 rs6000_emit_nmsub (e0, d, x0, one);
34605 /* x1 = x0 + e0 * x0 */
34606 x1 = gen_reg_rtx (mode);
34607 rs6000_emit_madd (x1, e0, x0, x0);
34609 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34610 ++i, xprev = xnext, eprev = enext) {
34612 /* enext = eprev * eprev */
34613 enext = gen_reg_rtx (mode);
34614 emit_insn (gen_mul (enext, eprev, eprev));
34616 /* xnext = xprev + enext * xprev */
34617 xnext = gen_reg_rtx (mode);
34618 rs6000_emit_madd (xnext, enext, xprev, xprev);
34621 } else
34622 xprev = x0;
34624 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34626 /* u = n * xprev */
34627 u = gen_reg_rtx (mode);
34628 emit_insn (gen_mul (u, n, xprev));
34630 /* v = n - (d * u) */
34631 v = gen_reg_rtx (mode);
34632 rs6000_emit_nmsub (v, d, u, n);
34634 /* dst = (v * xprev) + u */
34635 rs6000_emit_madd (dst, v, xprev, u);
34637 if (note_p)
34638 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34641 /* Goldschmidt's Algorithm for single/double-precision floating point
34642 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34644 void
34645 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34647 machine_mode mode = GET_MODE (src);
34648 rtx e = gen_reg_rtx (mode);
34649 rtx g = gen_reg_rtx (mode);
34650 rtx h = gen_reg_rtx (mode);
34652 /* Low precision estimates guarantee 5 bits of accuracy. High
34653 precision estimates guarantee 14 bits of accuracy. SFmode
34654 requires 23 bits of accuracy. DFmode requires 52 bits of
34655 accuracy. Each pass at least doubles the accuracy, leading
34656 to the following. */
34657 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34658 if (mode == DFmode || mode == V2DFmode)
34659 passes++;
34661 int i;
34662 rtx mhalf;
34663 enum insn_code code = optab_handler (smul_optab, mode);
34664 insn_gen_fn gen_mul = GEN_FCN (code);
34666 gcc_assert (code != CODE_FOR_nothing);
34668 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34670 /* e = rsqrt estimate */
34671 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34672 UNSPEC_RSQRT)));
34674 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34675 if (!recip)
34677 rtx zero = force_reg (mode, CONST0_RTX (mode));
34679 if (mode == SFmode)
34681 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34682 e, zero, mode, 0);
34683 if (target != e)
34684 emit_move_insn (e, target);
34686 else
34688 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34689 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34693 /* g = sqrt estimate. */
34694 emit_insn (gen_mul (g, e, src));
34695 /* h = 1/(2*sqrt) estimate. */
34696 emit_insn (gen_mul (h, e, mhalf));
34698 if (recip)
34700 if (passes == 1)
34702 rtx t = gen_reg_rtx (mode);
34703 rs6000_emit_nmsub (t, g, h, mhalf);
34704 /* Apply correction directly to 1/rsqrt estimate. */
34705 rs6000_emit_madd (dst, e, t, e);
34707 else
34709 for (i = 0; i < passes; i++)
34711 rtx t1 = gen_reg_rtx (mode);
34712 rtx g1 = gen_reg_rtx (mode);
34713 rtx h1 = gen_reg_rtx (mode);
34715 rs6000_emit_nmsub (t1, g, h, mhalf);
34716 rs6000_emit_madd (g1, g, t1, g);
34717 rs6000_emit_madd (h1, h, t1, h);
34719 g = g1;
34720 h = h1;
34722 /* Multiply by 2 for 1/rsqrt. */
34723 emit_insn (gen_add3_insn (dst, h, h));
34726 else
34728 rtx t = gen_reg_rtx (mode);
34729 rs6000_emit_nmsub (t, g, h, mhalf);
34730 rs6000_emit_madd (dst, g, t, g);
34733 return;
34736 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34737 (Power7) targets. DST is the target, and SRC is the argument operand. */
34739 void
34740 rs6000_emit_popcount (rtx dst, rtx src)
34742 machine_mode mode = GET_MODE (dst);
34743 rtx tmp1, tmp2;
34745 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34746 if (TARGET_POPCNTD)
34748 if (mode == SImode)
34749 emit_insn (gen_popcntdsi2 (dst, src));
34750 else
34751 emit_insn (gen_popcntddi2 (dst, src));
34752 return;
34755 tmp1 = gen_reg_rtx (mode);
34757 if (mode == SImode)
34759 emit_insn (gen_popcntbsi2 (tmp1, src));
34760 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34761 NULL_RTX, 0);
34762 tmp2 = force_reg (SImode, tmp2);
34763 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34765 else
34767 emit_insn (gen_popcntbdi2 (tmp1, src));
34768 tmp2 = expand_mult (DImode, tmp1,
34769 GEN_INT ((HOST_WIDE_INT)
34770 0x01010101 << 32 | 0x01010101),
34771 NULL_RTX, 0);
34772 tmp2 = force_reg (DImode, tmp2);
34773 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34778 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34779 target, and SRC is the argument operand. */
34781 void
34782 rs6000_emit_parity (rtx dst, rtx src)
34784 machine_mode mode = GET_MODE (dst);
34785 rtx tmp;
34787 tmp = gen_reg_rtx (mode);
34789 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34790 if (TARGET_CMPB)
34792 if (mode == SImode)
34794 emit_insn (gen_popcntbsi2 (tmp, src));
34795 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34797 else
34799 emit_insn (gen_popcntbdi2 (tmp, src));
34800 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34802 return;
34805 if (mode == SImode)
34807 /* Is mult+shift >= shift+xor+shift+xor? */
34808 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34810 rtx tmp1, tmp2, tmp3, tmp4;
34812 tmp1 = gen_reg_rtx (SImode);
34813 emit_insn (gen_popcntbsi2 (tmp1, src));
34815 tmp2 = gen_reg_rtx (SImode);
34816 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
34817 tmp3 = gen_reg_rtx (SImode);
34818 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
34820 tmp4 = gen_reg_rtx (SImode);
34821 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
34822 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
34824 else
34825 rs6000_emit_popcount (tmp, src);
34826 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
34828 else
34830 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34831 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
34833 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
34835 tmp1 = gen_reg_rtx (DImode);
34836 emit_insn (gen_popcntbdi2 (tmp1, src));
34838 tmp2 = gen_reg_rtx (DImode);
34839 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
34840 tmp3 = gen_reg_rtx (DImode);
34841 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
34843 tmp4 = gen_reg_rtx (DImode);
34844 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
34845 tmp5 = gen_reg_rtx (DImode);
34846 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
34848 tmp6 = gen_reg_rtx (DImode);
34849 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
34850 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
34852 else
34853 rs6000_emit_popcount (tmp, src);
34854 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
34858 /* Expand an Altivec constant permutation for little endian mode.
34859 There are two issues: First, the two input operands must be
34860 swapped so that together they form a double-wide array in LE
34861 order. Second, the vperm instruction has surprising behavior
34862 in LE mode: it interprets the elements of the source vectors
34863 in BE mode ("left to right") and interprets the elements of
34864 the destination vector in LE mode ("right to left"). To
34865 correct for this, we must subtract each element of the permute
34866 control vector from 31.
34868 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34869 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34870 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34871 serve as the permute control vector. Then, in BE mode,
34873 vperm 9,10,11,12
34875 places the desired result in vr9. However, in LE mode the
34876 vector contents will be
34878 vr10 = 00000003 00000002 00000001 00000000
34879 vr11 = 00000007 00000006 00000005 00000004
34881 The result of the vperm using the same permute control vector is
34883 vr9 = 05000000 07000000 01000000 03000000
34885 That is, the leftmost 4 bytes of vr10 are interpreted as the
34886 source for the rightmost 4 bytes of vr9, and so on.
34888 If we change the permute control vector to
34890 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
34892 and issue
34894 vperm 9,11,10,12
34896 we get the desired
34898 vr9 = 00000006 00000004 00000002 00000000. */
34900 void
34901 altivec_expand_vec_perm_const_le (rtx operands[4])
34903 unsigned int i;
34904 rtx perm[16];
34905 rtx constv, unspec;
34906 rtx target = operands[0];
34907 rtx op0 = operands[1];
34908 rtx op1 = operands[2];
34909 rtx sel = operands[3];
34911 /* Unpack and adjust the constant selector. */
34912 for (i = 0; i < 16; ++i)
34914 rtx e = XVECEXP (sel, 0, i);
34915 unsigned int elt = 31 - (INTVAL (e) & 31);
34916 perm[i] = GEN_INT (elt);
34919 /* Expand to a permute, swapping the inputs and using the
34920 adjusted selector. */
34921 if (!REG_P (op0))
34922 op0 = force_reg (V16QImode, op0);
34923 if (!REG_P (op1))
34924 op1 = force_reg (V16QImode, op1);
34926 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
34927 constv = force_reg (V16QImode, constv);
34928 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
34929 UNSPEC_VPERM);
34930 if (!REG_P (target))
34932 rtx tmp = gen_reg_rtx (V16QImode);
34933 emit_move_insn (tmp, unspec);
34934 unspec = tmp;
34937 emit_move_insn (target, unspec);
34940 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
34941 permute control vector. But here it's not a constant, so we must
34942 generate a vector NAND or NOR to do the adjustment. */
34944 void
34945 altivec_expand_vec_perm_le (rtx operands[4])
34947 rtx notx, iorx, unspec;
34948 rtx target = operands[0];
34949 rtx op0 = operands[1];
34950 rtx op1 = operands[2];
34951 rtx sel = operands[3];
34952 rtx tmp = target;
34953 rtx norreg = gen_reg_rtx (V16QImode);
34954 machine_mode mode = GET_MODE (target);
34956 /* Get everything in regs so the pattern matches. */
34957 if (!REG_P (op0))
34958 op0 = force_reg (mode, op0);
34959 if (!REG_P (op1))
34960 op1 = force_reg (mode, op1);
34961 if (!REG_P (sel))
34962 sel = force_reg (V16QImode, sel);
34963 if (!REG_P (target))
34964 tmp = gen_reg_rtx (mode);
34966 if (TARGET_P9_VECTOR)
34968 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
34969 UNSPEC_VPERMR);
34971 else
34973 /* Invert the selector with a VNAND if available, else a VNOR.
34974 The VNAND is preferred for future fusion opportunities. */
34975 notx = gen_rtx_NOT (V16QImode, sel);
34976 iorx = (TARGET_P8_VECTOR
34977 ? gen_rtx_IOR (V16QImode, notx, notx)
34978 : gen_rtx_AND (V16QImode, notx, notx));
34979 emit_insn (gen_rtx_SET (norreg, iorx));
34981 /* Permute with operands reversed and adjusted selector. */
34982 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
34983 UNSPEC_VPERM);
34986 /* Copy into target, possibly by way of a register. */
34987 if (!REG_P (target))
34989 emit_move_insn (tmp, unspec);
34990 unspec = tmp;
34993 emit_move_insn (target, unspec);
34996 /* Expand an Altivec constant permutation. Return true if we match
34997 an efficient implementation; false to fall back to VPERM. */
34999 bool
35000 altivec_expand_vec_perm_const (rtx operands[4])
35002 struct altivec_perm_insn {
35003 HOST_WIDE_INT mask;
35004 enum insn_code impl;
35005 unsigned char perm[16];
35007 static const struct altivec_perm_insn patterns[] = {
35008 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35009 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35010 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35011 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35012 { OPTION_MASK_ALTIVEC,
35013 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35014 : CODE_FOR_altivec_vmrglb_direct),
35015 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35016 { OPTION_MASK_ALTIVEC,
35017 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35018 : CODE_FOR_altivec_vmrglh_direct),
35019 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35020 { OPTION_MASK_ALTIVEC,
35021 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35022 : CODE_FOR_altivec_vmrglw_direct),
35023 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35024 { OPTION_MASK_ALTIVEC,
35025 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35026 : CODE_FOR_altivec_vmrghb_direct),
35027 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35028 { OPTION_MASK_ALTIVEC,
35029 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35030 : CODE_FOR_altivec_vmrghh_direct),
35031 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35032 { OPTION_MASK_ALTIVEC,
35033 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35034 : CODE_FOR_altivec_vmrghw_direct),
35035 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35036 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
35037 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35038 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
35039 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35042 unsigned int i, j, elt, which;
35043 unsigned char perm[16];
35044 rtx target, op0, op1, sel, x;
35045 bool one_vec;
35047 target = operands[0];
35048 op0 = operands[1];
35049 op1 = operands[2];
35050 sel = operands[3];
35052 /* Unpack the constant selector. */
35053 for (i = which = 0; i < 16; ++i)
35055 rtx e = XVECEXP (sel, 0, i);
35056 elt = INTVAL (e) & 31;
35057 which |= (elt < 16 ? 1 : 2);
35058 perm[i] = elt;
35061 /* Simplify the constant selector based on operands. */
35062 switch (which)
35064 default:
35065 gcc_unreachable ();
35067 case 3:
35068 one_vec = false;
35069 if (!rtx_equal_p (op0, op1))
35070 break;
35071 /* FALLTHRU */
35073 case 2:
35074 for (i = 0; i < 16; ++i)
35075 perm[i] &= 15;
35076 op0 = op1;
35077 one_vec = true;
35078 break;
35080 case 1:
35081 op1 = op0;
35082 one_vec = true;
35083 break;
35086 /* Look for splat patterns. */
35087 if (one_vec)
35089 elt = perm[0];
35091 for (i = 0; i < 16; ++i)
35092 if (perm[i] != elt)
35093 break;
35094 if (i == 16)
35096 if (!BYTES_BIG_ENDIAN)
35097 elt = 15 - elt;
35098 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35099 return true;
35102 if (elt % 2 == 0)
35104 for (i = 0; i < 16; i += 2)
35105 if (perm[i] != elt || perm[i + 1] != elt + 1)
35106 break;
35107 if (i == 16)
35109 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35110 x = gen_reg_rtx (V8HImode);
35111 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35112 GEN_INT (field)));
35113 emit_move_insn (target, gen_lowpart (V16QImode, x));
35114 return true;
35118 if (elt % 4 == 0)
35120 for (i = 0; i < 16; i += 4)
35121 if (perm[i] != elt
35122 || perm[i + 1] != elt + 1
35123 || perm[i + 2] != elt + 2
35124 || perm[i + 3] != elt + 3)
35125 break;
35126 if (i == 16)
35128 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35129 x = gen_reg_rtx (V4SImode);
35130 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35131 GEN_INT (field)));
35132 emit_move_insn (target, gen_lowpart (V16QImode, x));
35133 return true;
35138 /* Look for merge and pack patterns. */
35139 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35141 bool swapped;
35143 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35144 continue;
35146 elt = patterns[j].perm[0];
35147 if (perm[0] == elt)
35148 swapped = false;
35149 else if (perm[0] == elt + 16)
35150 swapped = true;
35151 else
35152 continue;
35153 for (i = 1; i < 16; ++i)
35155 elt = patterns[j].perm[i];
35156 if (swapped)
35157 elt = (elt >= 16 ? elt - 16 : elt + 16);
35158 else if (one_vec && elt >= 16)
35159 elt -= 16;
35160 if (perm[i] != elt)
35161 break;
35163 if (i == 16)
35165 enum insn_code icode = patterns[j].impl;
35166 machine_mode omode = insn_data[icode].operand[0].mode;
35167 machine_mode imode = insn_data[icode].operand[1].mode;
35169 /* For little-endian, don't use vpkuwum and vpkuhum if the
35170 underlying vector type is not V4SI and V8HI, respectively.
35171 For example, using vpkuwum with a V8HI picks up the even
35172 halfwords (BE numbering) when the even halfwords (LE
35173 numbering) are what we need. */
35174 if (!BYTES_BIG_ENDIAN
35175 && icode == CODE_FOR_altivec_vpkuwum_direct
35176 && ((GET_CODE (op0) == REG
35177 && GET_MODE (op0) != V4SImode)
35178 || (GET_CODE (op0) == SUBREG
35179 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35180 continue;
35181 if (!BYTES_BIG_ENDIAN
35182 && icode == CODE_FOR_altivec_vpkuhum_direct
35183 && ((GET_CODE (op0) == REG
35184 && GET_MODE (op0) != V8HImode)
35185 || (GET_CODE (op0) == SUBREG
35186 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35187 continue;
35189 /* For little-endian, the two input operands must be swapped
35190 (or swapped back) to ensure proper right-to-left numbering
35191 from 0 to 2N-1. */
35192 if (swapped ^ !BYTES_BIG_ENDIAN)
35193 std::swap (op0, op1);
35194 if (imode != V16QImode)
35196 op0 = gen_lowpart (imode, op0);
35197 op1 = gen_lowpart (imode, op1);
35199 if (omode == V16QImode)
35200 x = target;
35201 else
35202 x = gen_reg_rtx (omode);
35203 emit_insn (GEN_FCN (icode) (x, op0, op1));
35204 if (omode != V16QImode)
35205 emit_move_insn (target, gen_lowpart (V16QImode, x));
35206 return true;
35210 if (!BYTES_BIG_ENDIAN)
35212 altivec_expand_vec_perm_const_le (operands);
35213 return true;
35216 return false;
35219 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
35220 Return true if we match an efficient implementation. */
35222 static bool
35223 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35224 unsigned char perm0, unsigned char perm1)
35226 rtx x;
35228 /* If both selectors come from the same operand, fold to single op. */
35229 if ((perm0 & 2) == (perm1 & 2))
35231 if (perm0 & 2)
35232 op0 = op1;
35233 else
35234 op1 = op0;
35236 /* If both operands are equal, fold to simpler permutation. */
35237 if (rtx_equal_p (op0, op1))
35239 perm0 = perm0 & 1;
35240 perm1 = (perm1 & 1) + 2;
35242 /* If the first selector comes from the second operand, swap. */
35243 else if (perm0 & 2)
35245 if (perm1 & 2)
35246 return false;
35247 perm0 -= 2;
35248 perm1 += 2;
35249 std::swap (op0, op1);
35251 /* If the second selector does not come from the second operand, fail. */
35252 else if ((perm1 & 2) == 0)
35253 return false;
35255 /* Success! */
35256 if (target != NULL)
35258 machine_mode vmode, dmode;
35259 rtvec v;
35261 vmode = GET_MODE (target);
35262 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35263 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
35264 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35265 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35266 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35267 emit_insn (gen_rtx_SET (target, x));
35269 return true;
35272 bool
35273 rs6000_expand_vec_perm_const (rtx operands[4])
35275 rtx target, op0, op1, sel;
35276 unsigned char perm0, perm1;
35278 target = operands[0];
35279 op0 = operands[1];
35280 op1 = operands[2];
35281 sel = operands[3];
35283 /* Unpack the constant selector. */
35284 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35285 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35287 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35290 /* Test whether a constant permutation is supported. */
35292 static bool
35293 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
35294 const unsigned char *sel)
35296 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35297 if (TARGET_ALTIVEC)
35298 return true;
35300 /* Check for ps_merge* or evmerge* insns. */
35301 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35302 || (TARGET_SPE && vmode == V2SImode))
35304 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35305 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35306 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35309 return false;
35312 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35314 static void
35315 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35316 machine_mode vmode, unsigned nelt, rtx perm[])
35318 machine_mode imode;
35319 rtx x;
35321 imode = vmode;
35322 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
35324 imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
35325 imode = mode_for_vector (imode, nelt);
35328 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
35329 x = expand_vec_perm (vmode, op0, op1, x, target);
35330 if (x != target)
35331 emit_move_insn (target, x);
35334 /* Expand an extract even operation. */
35336 void
35337 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35339 machine_mode vmode = GET_MODE (target);
35340 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35341 rtx perm[16];
35343 for (i = 0; i < nelt; i++)
35344 perm[i] = GEN_INT (i * 2);
35346 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35349 /* Expand a vector interleave operation. */
35351 void
35352 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35354 machine_mode vmode = GET_MODE (target);
35355 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35356 rtx perm[16];
35358 high = (highp ? 0 : nelt / 2);
35359 for (i = 0; i < nelt / 2; i++)
35361 perm[i * 2] = GEN_INT (i + high);
35362 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
35365 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35368 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35369 void
35370 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35372 HOST_WIDE_INT hwi_scale (scale);
35373 REAL_VALUE_TYPE r_pow;
35374 rtvec v = rtvec_alloc (2);
35375 rtx elt;
35376 rtx scale_vec = gen_reg_rtx (V2DFmode);
35377 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35378 elt = const_double_from_real_value (r_pow, DFmode);
35379 RTVEC_ELT (v, 0) = elt;
35380 RTVEC_ELT (v, 1) = elt;
35381 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35382 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35385 /* Return an RTX representing where to find the function value of a
35386 function returning MODE. */
35387 static rtx
35388 rs6000_complex_function_value (machine_mode mode)
35390 unsigned int regno;
35391 rtx r1, r2;
35392 machine_mode inner = GET_MODE_INNER (mode);
35393 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35395 if (TARGET_FLOAT128
35396 && (mode == KCmode
35397 || (mode == TCmode && TARGET_IEEEQUAD)))
35398 regno = ALTIVEC_ARG_RETURN;
35400 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35401 regno = FP_ARG_RETURN;
35403 else
35405 regno = GP_ARG_RETURN;
35407 /* 32-bit is OK since it'll go in r3/r4. */
35408 if (TARGET_32BIT && inner_bytes >= 4)
35409 return gen_rtx_REG (mode, regno);
35412 if (inner_bytes >= 8)
35413 return gen_rtx_REG (mode, regno);
35415 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35416 const0_rtx);
35417 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35418 GEN_INT (inner_bytes));
35419 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35422 /* Return an rtx describing a return value of MODE as a PARALLEL
35423 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35424 stride REG_STRIDE. */
35426 static rtx
35427 rs6000_parallel_return (machine_mode mode,
35428 int n_elts, machine_mode elt_mode,
35429 unsigned int regno, unsigned int reg_stride)
35431 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35433 int i;
35434 for (i = 0; i < n_elts; i++)
35436 rtx r = gen_rtx_REG (elt_mode, regno);
35437 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35438 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35439 regno += reg_stride;
35442 return par;
35445 /* Target hook for TARGET_FUNCTION_VALUE.
35447 On the SPE, both FPs and vectors are returned in r3.
35449 On RS/6000 an integer value is in r3 and a floating-point value is in
35450 fp1, unless -msoft-float. */
35452 static rtx
35453 rs6000_function_value (const_tree valtype,
35454 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35455 bool outgoing ATTRIBUTE_UNUSED)
35457 machine_mode mode;
35458 unsigned int regno;
35459 machine_mode elt_mode;
35460 int n_elts;
35462 /* Special handling for structs in darwin64. */
35463 if (TARGET_MACHO
35464 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35466 CUMULATIVE_ARGS valcum;
35467 rtx valret;
35469 valcum.words = 0;
35470 valcum.fregno = FP_ARG_MIN_REG;
35471 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35472 /* Do a trial code generation as if this were going to be passed as
35473 an argument; if any part goes in memory, we return NULL. */
35474 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35475 if (valret)
35476 return valret;
35477 /* Otherwise fall through to standard ABI rules. */
35480 mode = TYPE_MODE (valtype);
35482 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35483 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35485 int first_reg, n_regs;
35487 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35489 /* _Decimal128 must use even/odd register pairs. */
35490 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35491 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35493 else
35495 first_reg = ALTIVEC_ARG_RETURN;
35496 n_regs = 1;
35499 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35502 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35503 if (TARGET_32BIT && TARGET_POWERPC64)
35504 switch (mode)
35506 default:
35507 break;
35508 case DImode:
35509 case SCmode:
35510 case DCmode:
35511 case TCmode:
35512 int count = GET_MODE_SIZE (mode) / 4;
35513 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35516 if ((INTEGRAL_TYPE_P (valtype)
35517 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35518 || POINTER_TYPE_P (valtype))
35519 mode = TARGET_32BIT ? SImode : DImode;
35521 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35522 /* _Decimal128 must use an even/odd register pair. */
35523 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35524 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
35525 && !FLOAT128_VECTOR_P (mode)
35526 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35527 regno = FP_ARG_RETURN;
35528 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35529 && targetm.calls.split_complex_arg)
35530 return rs6000_complex_function_value (mode);
35531 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35532 return register is used in both cases, and we won't see V2DImode/V2DFmode
35533 for pure altivec, combine the two cases. */
35534 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35535 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35536 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35537 regno = ALTIVEC_ARG_RETURN;
35538 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
35539 && (mode == DFmode || mode == DCmode
35540 || FLOAT128_IBM_P (mode) || mode == TCmode))
35541 return spe_build_register_parallel (mode, GP_ARG_RETURN);
35542 else
35543 regno = GP_ARG_RETURN;
35545 return gen_rtx_REG (mode, regno);
35548 /* Define how to find the value returned by a library function
35549 assuming the value has mode MODE. */
35551 rs6000_libcall_value (machine_mode mode)
35553 unsigned int regno;
35555 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35556 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35557 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35559 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35560 /* _Decimal128 must use an even/odd register pair. */
35561 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35562 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35563 && TARGET_HARD_FLOAT && TARGET_FPRS
35564 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35565 regno = FP_ARG_RETURN;
35566 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35567 return register is used in both cases, and we won't see V2DImode/V2DFmode
35568 for pure altivec, combine the two cases. */
35569 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35570 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35571 regno = ALTIVEC_ARG_RETURN;
35572 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35573 return rs6000_complex_function_value (mode);
35574 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
35575 && (mode == DFmode || mode == DCmode
35576 || FLOAT128_IBM_P (mode) || mode == TCmode))
35577 return spe_build_register_parallel (mode, GP_ARG_RETURN);
35578 else
35579 regno = GP_ARG_RETURN;
35581 return gen_rtx_REG (mode, regno);
35585 /* Return true if we use LRA instead of reload pass. */
35586 static bool
35587 rs6000_lra_p (void)
35589 return TARGET_LRA;
35592 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35593 Frame pointer elimination is automatically handled.
35595 For the RS/6000, if frame pointer elimination is being done, we would like
35596 to convert ap into fp, not sp.
35598 We need r30 if -mminimal-toc was specified, and there are constant pool
35599 references. */
35601 static bool
35602 rs6000_can_eliminate (const int from, const int to)
35604 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35605 ? ! frame_pointer_needed
35606 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35607 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
35608 : true);
35611 /* Define the offset between two registers, FROM to be eliminated and its
35612 replacement TO, at the start of a routine. */
35613 HOST_WIDE_INT
35614 rs6000_initial_elimination_offset (int from, int to)
35616 rs6000_stack_t *info = rs6000_stack_info ();
35617 HOST_WIDE_INT offset;
35619 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35620 offset = info->push_p ? 0 : -info->total_size;
35621 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35623 offset = info->push_p ? 0 : -info->total_size;
35624 if (FRAME_GROWS_DOWNWARD)
35625 offset += info->fixed_size + info->vars_size + info->parm_size;
35627 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35628 offset = FRAME_GROWS_DOWNWARD
35629 ? info->fixed_size + info->vars_size + info->parm_size
35630 : 0;
35631 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35632 offset = info->total_size;
35633 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35634 offset = info->push_p ? info->total_size : 0;
35635 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35636 offset = 0;
35637 else
35638 gcc_unreachable ();
35640 return offset;
35643 static rtx
35644 rs6000_dwarf_register_span (rtx reg)
35646 rtx parts[8];
35647 int i, words;
35648 unsigned regno = REGNO (reg);
35649 machine_mode mode = GET_MODE (reg);
35651 if (TARGET_SPE
35652 && regno < 32
35653 && (SPE_VECTOR_MODE (GET_MODE (reg))
35654 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
35655 && mode != SFmode && mode != SDmode && mode != SCmode)))
35657 else
35658 return NULL_RTX;
35660 regno = REGNO (reg);
35662 /* The duality of the SPE register size wreaks all kinds of havoc.
35663 This is a way of distinguishing r0 in 32-bits from r0 in
35664 64-bits. */
35665 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
35666 gcc_assert (words <= 4);
35667 for (i = 0; i < words; i++, regno++)
35669 if (BYTES_BIG_ENDIAN)
35671 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
35672 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
35674 else
35676 parts[2 * i] = gen_rtx_REG (SImode, regno);
35677 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
35681 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
35684 /* Fill in sizes for SPE register high parts in table used by unwinder. */
35686 static void
35687 rs6000_init_dwarf_reg_sizes_extra (tree address)
35689 if (TARGET_SPE)
35691 int i;
35692 machine_mode mode = TYPE_MODE (char_type_node);
35693 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35694 rtx mem = gen_rtx_MEM (BLKmode, addr);
35695 rtx value = gen_int_mode (4, mode);
35697 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
35699 int column = DWARF_REG_TO_UNWIND_COLUMN
35700 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35701 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35703 emit_move_insn (adjust_address (mem, mode, offset), value);
35707 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35709 int i;
35710 machine_mode mode = TYPE_MODE (char_type_node);
35711 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35712 rtx mem = gen_rtx_MEM (BLKmode, addr);
35713 rtx value = gen_int_mode (16, mode);
35715 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35716 The unwinder still needs to know the size of Altivec registers. */
35718 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35720 int column = DWARF_REG_TO_UNWIND_COLUMN
35721 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35722 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35724 emit_move_insn (adjust_address (mem, mode, offset), value);
35729 /* Map internal gcc register numbers to debug format register numbers.
35730 FORMAT specifies the type of debug register number to use:
35731 0 -- debug information, except for frame-related sections
35732 1 -- DWARF .debug_frame section
35733 2 -- DWARF .eh_frame section */
35735 unsigned int
35736 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35738 /* We never use the GCC internal number for SPE high registers.
35739 Those are mapped to the 1200..1231 range for all debug formats. */
35740 if (SPE_HIGH_REGNO_P (regno))
35741 return regno - FIRST_SPE_HIGH_REGNO + 1200;
35743 /* Except for the above, we use the internal number for non-DWARF
35744 debug information, and also for .eh_frame. */
35745 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35746 return regno;
35748 /* On some platforms, we use the standard DWARF register
35749 numbering for .debug_info and .debug_frame. */
35750 #ifdef RS6000_USE_DWARF_NUMBERING
35751 if (regno <= 63)
35752 return regno;
35753 if (regno == LR_REGNO)
35754 return 108;
35755 if (regno == CTR_REGNO)
35756 return 109;
35757 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35758 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35759 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35760 to the DWARF reg for CR. */
35761 if (format == 1 && regno == CR2_REGNO)
35762 return 64;
35763 if (CR_REGNO_P (regno))
35764 return regno - CR0_REGNO + 86;
35765 if (regno == CA_REGNO)
35766 return 101; /* XER */
35767 if (ALTIVEC_REGNO_P (regno))
35768 return regno - FIRST_ALTIVEC_REGNO + 1124;
35769 if (regno == VRSAVE_REGNO)
35770 return 356;
35771 if (regno == VSCR_REGNO)
35772 return 67;
35773 if (regno == SPE_ACC_REGNO)
35774 return 99;
35775 if (regno == SPEFSCR_REGNO)
35776 return 612;
35777 #endif
35778 return regno;
35781 /* target hook eh_return_filter_mode */
35782 static machine_mode
35783 rs6000_eh_return_filter_mode (void)
35785 return TARGET_32BIT ? SImode : word_mode;
35788 /* Target hook for scalar_mode_supported_p. */
35789 static bool
35790 rs6000_scalar_mode_supported_p (machine_mode mode)
35792 /* -m32 does not support TImode. This is the default, from
35793 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35794 same ABI as for -m32. But default_scalar_mode_supported_p allows
35795 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35796 for -mpowerpc64. */
35797 if (TARGET_32BIT && mode == TImode)
35798 return false;
35800 if (DECIMAL_FLOAT_MODE_P (mode))
35801 return default_decimal_float_supported_p ();
35802 else if (TARGET_FLOAT128 && (mode == KFmode || mode == IFmode))
35803 return true;
35804 else
35805 return default_scalar_mode_supported_p (mode);
35808 /* Target hook for vector_mode_supported_p. */
35809 static bool
35810 rs6000_vector_mode_supported_p (machine_mode mode)
35813 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
35814 return true;
35816 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
35817 return true;
35819 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35820 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35821 double-double. */
35822 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35823 return true;
35825 else
35826 return false;
35829 /* Target hook for c_mode_for_suffix. */
35830 static machine_mode
35831 rs6000_c_mode_for_suffix (char suffix)
35833 if (TARGET_FLOAT128)
35835 if (suffix == 'q' || suffix == 'Q')
35836 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35838 /* At the moment, we are not defining a suffix for IBM extended double.
35839 If/when the default for -mabi=ieeelongdouble is changed, and we want
35840 to support __ibm128 constants in legacy library code, we may need to
35841 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35842 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35843 __float80 constants. */
35846 return VOIDmode;
35849 /* Target hook for invalid_arg_for_unprototyped_fn. */
35850 static const char *
35851 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
35853 return (!rs6000_darwin64_abi
35854 && typelist == 0
35855 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
35856 && (funcdecl == NULL_TREE
35857 || (TREE_CODE (funcdecl) == FUNCTION_DECL
35858 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
35859 ? N_("AltiVec argument passed to unprototyped function")
35860 : NULL;
35863 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35864 setup by using __stack_chk_fail_local hidden function instead of
35865 calling __stack_chk_fail directly. Otherwise it is better to call
35866 __stack_chk_fail directly. */
35868 static tree ATTRIBUTE_UNUSED
35869 rs6000_stack_protect_fail (void)
35871 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
35872 ? default_hidden_stack_protect_fail ()
35873 : default_external_stack_protect_fail ();
35876 void
35877 rs6000_final_prescan_insn (rtx_insn *insn, rtx *operand ATTRIBUTE_UNUSED,
35878 int num_operands ATTRIBUTE_UNUSED)
35880 if (rs6000_warn_cell_microcode)
35882 const char *temp;
35883 int insn_code_number = recog_memoized (insn);
35884 location_t location = INSN_LOCATION (insn);
35886 /* Punt on insns we cannot recognize. */
35887 if (insn_code_number < 0)
35888 return;
35890 temp = get_insn_template (insn_code_number, insn);
35892 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
35893 warning_at (location, OPT_mwarn_cell_microcode,
35894 "emitting microcode insn %s\t[%s] #%d",
35895 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
35896 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
35897 warning_at (location, OPT_mwarn_cell_microcode,
35898 "emitting conditional microcode insn %s\t[%s] #%d",
35899 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
35903 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35905 #if TARGET_ELF
35906 static unsigned HOST_WIDE_INT
35907 rs6000_asan_shadow_offset (void)
35909 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
35911 #endif
35913 /* Mask options that we want to support inside of attribute((target)) and
35914 #pragma GCC target operations. Note, we do not include things like
35915 64/32-bit, endianess, hard/soft floating point, etc. that would have
35916 different calling sequences. */
35918 struct rs6000_opt_mask {
35919 const char *name; /* option name */
35920 HOST_WIDE_INT mask; /* mask to set */
35921 bool invert; /* invert sense of mask */
35922 bool valid_target; /* option is a target option */
35925 static struct rs6000_opt_mask const rs6000_opt_masks[] =
35927 { "altivec", OPTION_MASK_ALTIVEC, false, true },
35928 { "cmpb", OPTION_MASK_CMPB, false, true },
35929 { "crypto", OPTION_MASK_CRYPTO, false, true },
35930 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
35931 { "dlmzb", OPTION_MASK_DLMZB, false, true },
35932 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
35933 false, true },
35934 { "float128", OPTION_MASK_FLOAT128, false, false },
35935 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
35936 { "fprnd", OPTION_MASK_FPRND, false, true },
35937 { "hard-dfp", OPTION_MASK_DFP, false, true },
35938 { "htm", OPTION_MASK_HTM, false, true },
35939 { "isel", OPTION_MASK_ISEL, false, true },
35940 { "mfcrf", OPTION_MASK_MFCRF, false, true },
35941 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
35942 { "modulo", OPTION_MASK_MODULO, false, true },
35943 { "mulhw", OPTION_MASK_MULHW, false, true },
35944 { "multiple", OPTION_MASK_MULTIPLE, false, true },
35945 { "popcntb", OPTION_MASK_POPCNTB, false, true },
35946 { "popcntd", OPTION_MASK_POPCNTD, false, true },
35947 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
35948 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
35949 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
35950 { "power9-dform-scalar", OPTION_MASK_P9_DFORM_SCALAR, false, true },
35951 { "power9-dform-vector", OPTION_MASK_P9_DFORM_VECTOR, false, true },
35952 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
35953 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
35954 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
35955 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
35956 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
35957 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
35958 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
35959 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
35960 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
35961 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
35962 { "string", OPTION_MASK_STRING, false, true },
35963 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
35964 { "update", OPTION_MASK_NO_UPDATE, true , true },
35965 { "upper-regs-di", OPTION_MASK_UPPER_REGS_DI, false, true },
35966 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, true },
35967 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, true },
35968 { "vsx", OPTION_MASK_VSX, false, true },
35969 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
35970 #ifdef OPTION_MASK_64BIT
35971 #if TARGET_AIX_OS
35972 { "aix64", OPTION_MASK_64BIT, false, false },
35973 { "aix32", OPTION_MASK_64BIT, true, false },
35974 #else
35975 { "64", OPTION_MASK_64BIT, false, false },
35976 { "32", OPTION_MASK_64BIT, true, false },
35977 #endif
35978 #endif
35979 #ifdef OPTION_MASK_EABI
35980 { "eabi", OPTION_MASK_EABI, false, false },
35981 #endif
35982 #ifdef OPTION_MASK_LITTLE_ENDIAN
35983 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
35984 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
35985 #endif
35986 #ifdef OPTION_MASK_RELOCATABLE
35987 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
35988 #endif
35989 #ifdef OPTION_MASK_STRICT_ALIGN
35990 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
35991 #endif
35992 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
35993 { "string", OPTION_MASK_STRING, false, false },
35996 /* Builtin mask mapping for printing the flags. */
35997 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
35999 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36000 { "vsx", RS6000_BTM_VSX, false, false },
36001 { "spe", RS6000_BTM_SPE, false, false },
36002 { "paired", RS6000_BTM_PAIRED, false, false },
36003 { "fre", RS6000_BTM_FRE, false, false },
36004 { "fres", RS6000_BTM_FRES, false, false },
36005 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36006 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36007 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36008 { "cell", RS6000_BTM_CELL, false, false },
36009 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36010 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36011 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36012 { "crypto", RS6000_BTM_CRYPTO, false, false },
36013 { "htm", RS6000_BTM_HTM, false, false },
36014 { "hard-dfp", RS6000_BTM_DFP, false, false },
36015 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36016 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36017 { "float128", RS6000_BTM_FLOAT128, false, false },
36020 /* Option variables that we want to support inside attribute((target)) and
36021 #pragma GCC target operations. */
36023 struct rs6000_opt_var {
36024 const char *name; /* option name */
36025 size_t global_offset; /* offset of the option in global_options. */
36026 size_t target_offset; /* offset of the option in target options. */
36029 static struct rs6000_opt_var const rs6000_opt_vars[] =
36031 { "friz",
36032 offsetof (struct gcc_options, x_TARGET_FRIZ),
36033 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36034 { "avoid-indexed-addresses",
36035 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36036 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36037 { "paired",
36038 offsetof (struct gcc_options, x_rs6000_paired_float),
36039 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36040 { "longcall",
36041 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36042 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36043 { "optimize-swaps",
36044 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36045 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36046 { "allow-movmisalign",
36047 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36048 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36049 { "allow-df-permute",
36050 offsetof (struct gcc_options, x_TARGET_ALLOW_DF_PERMUTE),
36051 offsetof (struct cl_target_option, x_TARGET_ALLOW_DF_PERMUTE), },
36052 { "sched-groups",
36053 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36054 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36055 { "always-hint",
36056 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36057 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36058 { "align-branch-targets",
36059 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36060 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36061 { "vectorize-builtins",
36062 offsetof (struct gcc_options, x_TARGET_VECTORIZE_BUILTINS),
36063 offsetof (struct cl_target_option, x_TARGET_VECTORIZE_BUILTINS), },
36064 { "tls-markers",
36065 offsetof (struct gcc_options, x_tls_markers),
36066 offsetof (struct cl_target_option, x_tls_markers), },
36067 { "sched-prolog",
36068 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36069 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36070 { "sched-epilog",
36071 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36072 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36073 { "gen-cell-microcode",
36074 offsetof (struct gcc_options, x_rs6000_gen_cell_microcode),
36075 offsetof (struct cl_target_option, x_rs6000_gen_cell_microcode), },
36076 { "warn-cell-microcode",
36077 offsetof (struct gcc_options, x_rs6000_warn_cell_microcode),
36078 offsetof (struct cl_target_option, x_rs6000_warn_cell_microcode), },
36081 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36082 parsing. Return true if there were no errors. */
36084 static bool
36085 rs6000_inner_target_options (tree args, bool attr_p)
36087 bool ret = true;
36089 if (args == NULL_TREE)
36092 else if (TREE_CODE (args) == STRING_CST)
36094 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36095 char *q;
36097 while ((q = strtok (p, ",")) != NULL)
36099 bool error_p = false;
36100 bool not_valid_p = false;
36101 const char *cpu_opt = NULL;
36103 p = NULL;
36104 if (strncmp (q, "cpu=", 4) == 0)
36106 int cpu_index = rs6000_cpu_name_lookup (q+4);
36107 if (cpu_index >= 0)
36108 rs6000_cpu_index = cpu_index;
36109 else
36111 error_p = true;
36112 cpu_opt = q+4;
36115 else if (strncmp (q, "tune=", 5) == 0)
36117 int tune_index = rs6000_cpu_name_lookup (q+5);
36118 if (tune_index >= 0)
36119 rs6000_tune_index = tune_index;
36120 else
36122 error_p = true;
36123 cpu_opt = q+5;
36126 else
36128 size_t i;
36129 bool invert = false;
36130 char *r = q;
36132 error_p = true;
36133 if (strncmp (r, "no-", 3) == 0)
36135 invert = true;
36136 r += 3;
36139 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36140 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36142 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36144 if (!rs6000_opt_masks[i].valid_target)
36145 not_valid_p = true;
36146 else
36148 error_p = false;
36149 rs6000_isa_flags_explicit |= mask;
36151 /* VSX needs altivec, so -mvsx automagically sets
36152 altivec and disables -mavoid-indexed-addresses. */
36153 if (!invert)
36155 if (mask == OPTION_MASK_VSX)
36157 mask |= OPTION_MASK_ALTIVEC;
36158 TARGET_AVOID_XFORM = 0;
36162 if (rs6000_opt_masks[i].invert)
36163 invert = !invert;
36165 if (invert)
36166 rs6000_isa_flags &= ~mask;
36167 else
36168 rs6000_isa_flags |= mask;
36170 break;
36173 if (error_p && !not_valid_p)
36175 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36176 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36178 size_t j = rs6000_opt_vars[i].global_offset;
36179 *((int *) ((char *)&global_options + j)) = !invert;
36180 error_p = false;
36181 not_valid_p = false;
36182 break;
36187 if (error_p)
36189 const char *eprefix, *esuffix;
36191 ret = false;
36192 if (attr_p)
36194 eprefix = "__attribute__((__target__(";
36195 esuffix = ")))";
36197 else
36199 eprefix = "#pragma GCC target ";
36200 esuffix = "";
36203 if (cpu_opt)
36204 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
36205 q, esuffix);
36206 else if (not_valid_p)
36207 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
36208 else
36209 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
36214 else if (TREE_CODE (args) == TREE_LIST)
36218 tree value = TREE_VALUE (args);
36219 if (value)
36221 bool ret2 = rs6000_inner_target_options (value, attr_p);
36222 if (!ret2)
36223 ret = false;
36225 args = TREE_CHAIN (args);
36227 while (args != NULL_TREE);
36230 else
36231 gcc_unreachable ();
36233 return ret;
36236 /* Print out the target options as a list for -mdebug=target. */
36238 static void
36239 rs6000_debug_target_options (tree args, const char *prefix)
36241 if (args == NULL_TREE)
36242 fprintf (stderr, "%s<NULL>", prefix);
36244 else if (TREE_CODE (args) == STRING_CST)
36246 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36247 char *q;
36249 while ((q = strtok (p, ",")) != NULL)
36251 p = NULL;
36252 fprintf (stderr, "%s\"%s\"", prefix, q);
36253 prefix = ", ";
36257 else if (TREE_CODE (args) == TREE_LIST)
36261 tree value = TREE_VALUE (args);
36262 if (value)
36264 rs6000_debug_target_options (value, prefix);
36265 prefix = ", ";
36267 args = TREE_CHAIN (args);
36269 while (args != NULL_TREE);
36272 else
36273 gcc_unreachable ();
36275 return;
36279 /* Hook to validate attribute((target("..."))). */
36281 static bool
36282 rs6000_valid_attribute_p (tree fndecl,
36283 tree ARG_UNUSED (name),
36284 tree args,
36285 int flags)
36287 struct cl_target_option cur_target;
36288 bool ret;
36289 tree old_optimize = build_optimization_node (&global_options);
36290 tree new_target, new_optimize;
36291 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36293 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36295 if (TARGET_DEBUG_TARGET)
36297 tree tname = DECL_NAME (fndecl);
36298 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36299 if (tname)
36300 fprintf (stderr, "function: %.*s\n",
36301 (int) IDENTIFIER_LENGTH (tname),
36302 IDENTIFIER_POINTER (tname));
36303 else
36304 fprintf (stderr, "function: unknown\n");
36306 fprintf (stderr, "args:");
36307 rs6000_debug_target_options (args, " ");
36308 fprintf (stderr, "\n");
36310 if (flags)
36311 fprintf (stderr, "flags: 0x%x\n", flags);
36313 fprintf (stderr, "--------------------\n");
36316 old_optimize = build_optimization_node (&global_options);
36317 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36319 /* If the function changed the optimization levels as well as setting target
36320 options, start with the optimizations specified. */
36321 if (func_optimize && func_optimize != old_optimize)
36322 cl_optimization_restore (&global_options,
36323 TREE_OPTIMIZATION (func_optimize));
36325 /* The target attributes may also change some optimization flags, so update
36326 the optimization options if necessary. */
36327 cl_target_option_save (&cur_target, &global_options);
36328 rs6000_cpu_index = rs6000_tune_index = -1;
36329 ret = rs6000_inner_target_options (args, true);
36331 /* Set up any additional state. */
36332 if (ret)
36334 ret = rs6000_option_override_internal (false);
36335 new_target = build_target_option_node (&global_options);
36337 else
36338 new_target = NULL;
36340 new_optimize = build_optimization_node (&global_options);
36342 if (!new_target)
36343 ret = false;
36345 else if (fndecl)
36347 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36349 if (old_optimize != new_optimize)
36350 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36353 cl_target_option_restore (&global_options, &cur_target);
36355 if (old_optimize != new_optimize)
36356 cl_optimization_restore (&global_options,
36357 TREE_OPTIMIZATION (old_optimize));
36359 return ret;
36363 /* Hook to validate the current #pragma GCC target and set the state, and
36364 update the macros based on what was changed. If ARGS is NULL, then
36365 POP_TARGET is used to reset the options. */
36367 bool
36368 rs6000_pragma_target_parse (tree args, tree pop_target)
36370 tree prev_tree = build_target_option_node (&global_options);
36371 tree cur_tree;
36372 struct cl_target_option *prev_opt, *cur_opt;
36373 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36374 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36376 if (TARGET_DEBUG_TARGET)
36378 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36379 fprintf (stderr, "args:");
36380 rs6000_debug_target_options (args, " ");
36381 fprintf (stderr, "\n");
36383 if (pop_target)
36385 fprintf (stderr, "pop_target:\n");
36386 debug_tree (pop_target);
36388 else
36389 fprintf (stderr, "pop_target: <NULL>\n");
36391 fprintf (stderr, "--------------------\n");
36394 if (! args)
36396 cur_tree = ((pop_target)
36397 ? pop_target
36398 : target_option_default_node);
36399 cl_target_option_restore (&global_options,
36400 TREE_TARGET_OPTION (cur_tree));
36402 else
36404 rs6000_cpu_index = rs6000_tune_index = -1;
36405 if (!rs6000_inner_target_options (args, false)
36406 || !rs6000_option_override_internal (false)
36407 || (cur_tree = build_target_option_node (&global_options))
36408 == NULL_TREE)
36410 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36411 fprintf (stderr, "invalid pragma\n");
36413 return false;
36417 target_option_current_node = cur_tree;
36419 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36420 change the macros that are defined. */
36421 if (rs6000_target_modify_macros_ptr)
36423 prev_opt = TREE_TARGET_OPTION (prev_tree);
36424 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36425 prev_flags = prev_opt->x_rs6000_isa_flags;
36427 cur_opt = TREE_TARGET_OPTION (cur_tree);
36428 cur_flags = cur_opt->x_rs6000_isa_flags;
36429 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36431 diff_bumask = (prev_bumask ^ cur_bumask);
36432 diff_flags = (prev_flags ^ cur_flags);
36434 if ((diff_flags != 0) || (diff_bumask != 0))
36436 /* Delete old macros. */
36437 rs6000_target_modify_macros_ptr (false,
36438 prev_flags & diff_flags,
36439 prev_bumask & diff_bumask);
36441 /* Define new macros. */
36442 rs6000_target_modify_macros_ptr (true,
36443 cur_flags & diff_flags,
36444 cur_bumask & diff_bumask);
36448 return true;
36452 /* Remember the last target of rs6000_set_current_function. */
36453 static GTY(()) tree rs6000_previous_fndecl;
36455 /* Establish appropriate back-end context for processing the function
36456 FNDECL. The argument might be NULL to indicate processing at top
36457 level, outside of any function scope. */
36458 static void
36459 rs6000_set_current_function (tree fndecl)
36461 tree old_tree = (rs6000_previous_fndecl
36462 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
36463 : NULL_TREE);
36465 tree new_tree = (fndecl
36466 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
36467 : NULL_TREE);
36469 if (TARGET_DEBUG_TARGET)
36471 bool print_final = false;
36472 fprintf (stderr, "\n==================== rs6000_set_current_function");
36474 if (fndecl)
36475 fprintf (stderr, ", fndecl %s (%p)",
36476 (DECL_NAME (fndecl)
36477 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36478 : "<unknown>"), (void *)fndecl);
36480 if (rs6000_previous_fndecl)
36481 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36483 fprintf (stderr, "\n");
36484 if (new_tree)
36486 fprintf (stderr, "\nnew fndecl target specific options:\n");
36487 debug_tree (new_tree);
36488 print_final = true;
36491 if (old_tree)
36493 fprintf (stderr, "\nold fndecl target specific options:\n");
36494 debug_tree (old_tree);
36495 print_final = true;
36498 if (print_final)
36499 fprintf (stderr, "--------------------\n");
36502 /* Only change the context if the function changes. This hook is called
36503 several times in the course of compiling a function, and we don't want to
36504 slow things down too much or call target_reinit when it isn't safe. */
36505 if (fndecl && fndecl != rs6000_previous_fndecl)
36507 rs6000_previous_fndecl = fndecl;
36508 if (old_tree == new_tree)
36511 else if (new_tree && new_tree != target_option_default_node)
36513 cl_target_option_restore (&global_options,
36514 TREE_TARGET_OPTION (new_tree));
36515 if (TREE_TARGET_GLOBALS (new_tree))
36516 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36517 else
36518 TREE_TARGET_GLOBALS (new_tree)
36519 = save_target_globals_default_opts ();
36522 else if (old_tree && old_tree != target_option_default_node)
36524 new_tree = target_option_current_node;
36525 cl_target_option_restore (&global_options,
36526 TREE_TARGET_OPTION (new_tree));
36527 if (TREE_TARGET_GLOBALS (new_tree))
36528 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36529 else if (new_tree == target_option_default_node)
36530 restore_target_globals (&default_target_globals);
36531 else
36532 TREE_TARGET_GLOBALS (new_tree)
36533 = save_target_globals_default_opts ();
36539 /* Save the current options */
36541 static void
36542 rs6000_function_specific_save (struct cl_target_option *ptr,
36543 struct gcc_options *opts)
36545 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36546 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36549 /* Restore the current options */
36551 static void
36552 rs6000_function_specific_restore (struct gcc_options *opts,
36553 struct cl_target_option *ptr)
36556 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36557 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36558 (void) rs6000_option_override_internal (false);
36561 /* Print the current options */
36563 static void
36564 rs6000_function_specific_print (FILE *file, int indent,
36565 struct cl_target_option *ptr)
36567 rs6000_print_isa_options (file, indent, "Isa options set",
36568 ptr->x_rs6000_isa_flags);
36570 rs6000_print_isa_options (file, indent, "Isa options explicit",
36571 ptr->x_rs6000_isa_flags_explicit);
36574 /* Helper function to print the current isa or misc options on a line. */
36576 static void
36577 rs6000_print_options_internal (FILE *file,
36578 int indent,
36579 const char *string,
36580 HOST_WIDE_INT flags,
36581 const char *prefix,
36582 const struct rs6000_opt_mask *opts,
36583 size_t num_elements)
36585 size_t i;
36586 size_t start_column = 0;
36587 size_t cur_column;
36588 size_t max_column = 120;
36589 size_t prefix_len = strlen (prefix);
36590 size_t comma_len = 0;
36591 const char *comma = "";
36593 if (indent)
36594 start_column += fprintf (file, "%*s", indent, "");
36596 if (!flags)
36598 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36599 return;
36602 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36604 /* Print the various mask options. */
36605 cur_column = start_column;
36606 for (i = 0; i < num_elements; i++)
36608 bool invert = opts[i].invert;
36609 const char *name = opts[i].name;
36610 const char *no_str = "";
36611 HOST_WIDE_INT mask = opts[i].mask;
36612 size_t len = comma_len + prefix_len + strlen (name);
36614 if (!invert)
36616 if ((flags & mask) == 0)
36618 no_str = "no-";
36619 len += sizeof ("no-") - 1;
36622 flags &= ~mask;
36625 else
36627 if ((flags & mask) != 0)
36629 no_str = "no-";
36630 len += sizeof ("no-") - 1;
36633 flags |= mask;
36636 cur_column += len;
36637 if (cur_column > max_column)
36639 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36640 cur_column = start_column + len;
36641 comma = "";
36644 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36645 comma = ", ";
36646 comma_len = sizeof (", ") - 1;
36649 fputs ("\n", file);
36652 /* Helper function to print the current isa options on a line. */
36654 static void
36655 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36656 HOST_WIDE_INT flags)
36658 rs6000_print_options_internal (file, indent, string, flags, "-m",
36659 &rs6000_opt_masks[0],
36660 ARRAY_SIZE (rs6000_opt_masks));
36663 static void
36664 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36665 HOST_WIDE_INT flags)
36667 rs6000_print_options_internal (file, indent, string, flags, "",
36668 &rs6000_builtin_mask_names[0],
36669 ARRAY_SIZE (rs6000_builtin_mask_names));
36673 /* Hook to determine if one function can safely inline another. */
36675 static bool
36676 rs6000_can_inline_p (tree caller, tree callee)
36678 bool ret = false;
36679 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
36680 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
36682 /* If callee has no option attributes, then it is ok to inline. */
36683 if (!callee_tree)
36684 ret = true;
36686 /* If caller has no option attributes, but callee does then it is not ok to
36687 inline. */
36688 else if (!caller_tree)
36689 ret = false;
36691 else
36693 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
36694 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
36696 /* Callee's options should a subset of the caller's, i.e. a vsx function
36697 can inline an altivec function but a non-vsx function can't inline a
36698 vsx function. */
36699 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
36700 == callee_opts->x_rs6000_isa_flags)
36701 ret = true;
36704 if (TARGET_DEBUG_TARGET)
36705 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
36706 (DECL_NAME (caller)
36707 ? IDENTIFIER_POINTER (DECL_NAME (caller))
36708 : "<unknown>"),
36709 (DECL_NAME (callee)
36710 ? IDENTIFIER_POINTER (DECL_NAME (callee))
36711 : "<unknown>"),
36712 (ret ? "can" : "cannot"));
36714 return ret;
36717 /* Allocate a stack temp and fixup the address so it meets the particular
36718 memory requirements (either offetable or REG+REG addressing). */
36721 rs6000_allocate_stack_temp (machine_mode mode,
36722 bool offsettable_p,
36723 bool reg_reg_p)
36725 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
36726 rtx addr = XEXP (stack, 0);
36727 int strict_p = (reload_in_progress || reload_completed);
36729 if (!legitimate_indirect_address_p (addr, strict_p))
36731 if (offsettable_p
36732 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
36733 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
36735 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
36736 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
36739 return stack;
36742 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
36743 to such a form to deal with memory reference instructions like STFIWX that
36744 only take reg+reg addressing. */
36747 rs6000_address_for_fpconvert (rtx x)
36749 int strict_p = (reload_in_progress || reload_completed);
36750 rtx addr;
36752 gcc_assert (MEM_P (x));
36753 addr = XEXP (x, 0);
36754 if (! legitimate_indirect_address_p (addr, strict_p)
36755 && ! legitimate_indexed_address_p (addr, strict_p))
36757 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
36759 rtx reg = XEXP (addr, 0);
36760 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
36761 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
36762 gcc_assert (REG_P (reg));
36763 emit_insn (gen_add3_insn (reg, reg, size_rtx));
36764 addr = reg;
36766 else if (GET_CODE (addr) == PRE_MODIFY)
36768 rtx reg = XEXP (addr, 0);
36769 rtx expr = XEXP (addr, 1);
36770 gcc_assert (REG_P (reg));
36771 gcc_assert (GET_CODE (expr) == PLUS);
36772 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
36773 addr = reg;
36776 x = replace_equiv_address (x, copy_addr_to_reg (addr));
36779 return x;
36782 /* Given a memory reference, if it is not in the form for altivec memory
36783 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
36784 convert to the altivec format. */
36787 rs6000_address_for_altivec (rtx x)
36789 gcc_assert (MEM_P (x));
36790 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
36792 rtx addr = XEXP (x, 0);
36793 int strict_p = (reload_in_progress || reload_completed);
36795 if (!legitimate_indexed_address_p (addr, strict_p)
36796 && !legitimate_indirect_address_p (addr, strict_p))
36797 addr = copy_to_mode_reg (Pmode, addr);
36799 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
36800 x = change_address (x, GET_MODE (x), addr);
36803 return x;
36806 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
36808 On the RS/6000, all integer constants are acceptable, most won't be valid
36809 for particular insns, though. Only easy FP constants are acceptable. */
36811 static bool
36812 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
36814 if (TARGET_ELF && tls_referenced_p (x))
36815 return false;
36817 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
36818 || GET_MODE (x) == VOIDmode
36819 || (TARGET_POWERPC64 && mode == DImode)
36820 || easy_fp_constant (x, mode)
36821 || easy_vector_constant (x, mode));
36825 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
36827 static bool
36828 chain_already_loaded (rtx_insn *last)
36830 for (; last != NULL; last = PREV_INSN (last))
36832 if (NONJUMP_INSN_P (last))
36834 rtx patt = PATTERN (last);
36836 if (GET_CODE (patt) == SET)
36838 rtx lhs = XEXP (patt, 0);
36840 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
36841 return true;
36845 return false;
36848 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
36850 void
36851 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
36853 const bool direct_call_p
36854 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
36855 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
36856 rtx toc_load = NULL_RTX;
36857 rtx toc_restore = NULL_RTX;
36858 rtx func_addr;
36859 rtx abi_reg = NULL_RTX;
36860 rtx call[4];
36861 int n_call;
36862 rtx insn;
36864 /* Handle longcall attributes. */
36865 if (INTVAL (cookie) & CALL_LONG)
36866 func_desc = rs6000_longcall_ref (func_desc);
36868 /* Handle indirect calls. */
36869 if (GET_CODE (func_desc) != SYMBOL_REF
36870 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
36872 /* Save the TOC into its reserved slot before the call,
36873 and prepare to restore it after the call. */
36874 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
36875 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
36876 rtx stack_toc_mem = gen_frame_mem (Pmode,
36877 gen_rtx_PLUS (Pmode, stack_ptr,
36878 stack_toc_offset));
36879 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
36880 gen_rtvec (1, stack_toc_offset),
36881 UNSPEC_TOCSLOT);
36882 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
36884 /* Can we optimize saving the TOC in the prologue or
36885 do we need to do it at every call? */
36886 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
36887 cfun->machine->save_toc_in_prologue = true;
36888 else
36890 MEM_VOLATILE_P (stack_toc_mem) = 1;
36891 emit_move_insn (stack_toc_mem, toc_reg);
36894 if (DEFAULT_ABI == ABI_ELFv2)
36896 /* A function pointer in the ELFv2 ABI is just a plain address, but
36897 the ABI requires it to be loaded into r12 before the call. */
36898 func_addr = gen_rtx_REG (Pmode, 12);
36899 emit_move_insn (func_addr, func_desc);
36900 abi_reg = func_addr;
36902 else
36904 /* A function pointer under AIX is a pointer to a data area whose
36905 first word contains the actual address of the function, whose
36906 second word contains a pointer to its TOC, and whose third word
36907 contains a value to place in the static chain register (r11).
36908 Note that if we load the static chain, our "trampoline" need
36909 not have any executable code. */
36911 /* Load up address of the actual function. */
36912 func_desc = force_reg (Pmode, func_desc);
36913 func_addr = gen_reg_rtx (Pmode);
36914 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
36916 /* Prepare to load the TOC of the called function. Note that the
36917 TOC load must happen immediately before the actual call so
36918 that unwinding the TOC registers works correctly. See the
36919 comment in frob_update_context. */
36920 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
36921 rtx func_toc_mem = gen_rtx_MEM (Pmode,
36922 gen_rtx_PLUS (Pmode, func_desc,
36923 func_toc_offset));
36924 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
36926 /* If we have a static chain, load it up. But, if the call was
36927 originally direct, the 3rd word has not been written since no
36928 trampoline has been built, so we ought not to load it, lest we
36929 override a static chain value. */
36930 if (!direct_call_p
36931 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
36932 && !chain_already_loaded (get_current_sequence ()->next->last))
36934 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
36935 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
36936 rtx func_sc_mem = gen_rtx_MEM (Pmode,
36937 gen_rtx_PLUS (Pmode, func_desc,
36938 func_sc_offset));
36939 emit_move_insn (sc_reg, func_sc_mem);
36940 abi_reg = sc_reg;
36944 else
36946 /* Direct calls use the TOC: for local calls, the callee will
36947 assume the TOC register is set; for non-local calls, the
36948 PLT stub needs the TOC register. */
36949 abi_reg = toc_reg;
36950 func_addr = func_desc;
36953 /* Create the call. */
36954 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
36955 if (value != NULL_RTX)
36956 call[0] = gen_rtx_SET (value, call[0]);
36957 n_call = 1;
36959 if (toc_load)
36960 call[n_call++] = toc_load;
36961 if (toc_restore)
36962 call[n_call++] = toc_restore;
36964 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
36966 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
36967 insn = emit_call_insn (insn);
36969 /* Mention all registers defined by the ABI to hold information
36970 as uses in CALL_INSN_FUNCTION_USAGE. */
36971 if (abi_reg)
36972 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
36975 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
36977 void
36978 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
36980 rtx call[2];
36981 rtx insn;
36983 gcc_assert (INTVAL (cookie) == 0);
36985 /* Create the call. */
36986 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
36987 if (value != NULL_RTX)
36988 call[0] = gen_rtx_SET (value, call[0]);
36990 call[1] = simple_return_rtx;
36992 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
36993 insn = emit_call_insn (insn);
36995 /* Note use of the TOC register. */
36996 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
36997 /* We need to also mark a use of the link register since the function we
36998 sibling-call to will use it to return to our caller. */
36999 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
37002 /* Return whether we need to always update the saved TOC pointer when we update
37003 the stack pointer. */
37005 static bool
37006 rs6000_save_toc_in_prologue_p (void)
37008 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37011 #ifdef HAVE_GAS_HIDDEN
37012 # define USE_HIDDEN_LINKONCE 1
37013 #else
37014 # define USE_HIDDEN_LINKONCE 0
37015 #endif
37017 /* Fills in the label name that should be used for a 476 link stack thunk. */
37019 void
37020 get_ppc476_thunk_name (char name[32])
37022 gcc_assert (TARGET_LINK_STACK);
37024 if (USE_HIDDEN_LINKONCE)
37025 sprintf (name, "__ppc476.get_thunk");
37026 else
37027 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37030 /* This function emits the simple thunk routine that is used to preserve
37031 the link stack on the 476 cpu. */
37033 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37034 static void
37035 rs6000_code_end (void)
37037 char name[32];
37038 tree decl;
37040 if (!TARGET_LINK_STACK)
37041 return;
37043 get_ppc476_thunk_name (name);
37045 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37046 build_function_type_list (void_type_node, NULL_TREE));
37047 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37048 NULL_TREE, void_type_node);
37049 TREE_PUBLIC (decl) = 1;
37050 TREE_STATIC (decl) = 1;
37052 #if RS6000_WEAK
37053 if (USE_HIDDEN_LINKONCE)
37055 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37056 targetm.asm_out.unique_section (decl, 0);
37057 switch_to_section (get_named_section (decl, NULL, 0));
37058 DECL_WEAK (decl) = 1;
37059 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37060 targetm.asm_out.globalize_label (asm_out_file, name);
37061 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37062 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37064 else
37065 #endif
37067 switch_to_section (text_section);
37068 ASM_OUTPUT_LABEL (asm_out_file, name);
37071 DECL_INITIAL (decl) = make_node (BLOCK);
37072 current_function_decl = decl;
37073 allocate_struct_function (decl, false);
37074 init_function_start (decl);
37075 first_function_block_is_cold = false;
37076 /* Make sure unwind info is emitted for the thunk if needed. */
37077 final_start_function (emit_barrier (), asm_out_file, 1);
37079 fputs ("\tblr\n", asm_out_file);
37081 final_end_function ();
37082 init_insn_lengths ();
37083 free_after_compilation (cfun);
37084 set_cfun (NULL);
37085 current_function_decl = NULL;
37088 /* Add r30 to hard reg set if the prologue sets it up and it is not
37089 pic_offset_table_rtx. */
37091 static void
37092 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37094 if (!TARGET_SINGLE_PIC_BASE
37095 && TARGET_TOC
37096 && TARGET_MINIMAL_TOC
37097 && get_pool_size () != 0)
37098 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37099 if (cfun->machine->split_stack_argp_used)
37100 add_to_hard_reg_set (&set->set, Pmode, 12);
37104 /* Helper function for rs6000_split_logical to emit a logical instruction after
37105 spliting the operation to single GPR registers.
37107 DEST is the destination register.
37108 OP1 and OP2 are the input source registers.
37109 CODE is the base operation (AND, IOR, XOR, NOT).
37110 MODE is the machine mode.
37111 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37112 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37113 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37115 static void
37116 rs6000_split_logical_inner (rtx dest,
37117 rtx op1,
37118 rtx op2,
37119 enum rtx_code code,
37120 machine_mode mode,
37121 bool complement_final_p,
37122 bool complement_op1_p,
37123 bool complement_op2_p)
37125 rtx bool_rtx;
37127 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37128 if (op2 && GET_CODE (op2) == CONST_INT
37129 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37130 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37132 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37133 HOST_WIDE_INT value = INTVAL (op2) & mask;
37135 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37136 if (code == AND)
37138 if (value == 0)
37140 emit_insn (gen_rtx_SET (dest, const0_rtx));
37141 return;
37144 else if (value == mask)
37146 if (!rtx_equal_p (dest, op1))
37147 emit_insn (gen_rtx_SET (dest, op1));
37148 return;
37152 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37153 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37154 else if (code == IOR || code == XOR)
37156 if (value == 0)
37158 if (!rtx_equal_p (dest, op1))
37159 emit_insn (gen_rtx_SET (dest, op1));
37160 return;
37165 if (code == AND && mode == SImode
37166 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37168 emit_insn (gen_andsi3 (dest, op1, op2));
37169 return;
37172 if (complement_op1_p)
37173 op1 = gen_rtx_NOT (mode, op1);
37175 if (complement_op2_p)
37176 op2 = gen_rtx_NOT (mode, op2);
37178 /* For canonical RTL, if only one arm is inverted it is the first. */
37179 if (!complement_op1_p && complement_op2_p)
37180 std::swap (op1, op2);
37182 bool_rtx = ((code == NOT)
37183 ? gen_rtx_NOT (mode, op1)
37184 : gen_rtx_fmt_ee (code, mode, op1, op2));
37186 if (complement_final_p)
37187 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37189 emit_insn (gen_rtx_SET (dest, bool_rtx));
37192 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37193 operations are split immediately during RTL generation to allow for more
37194 optimizations of the AND/IOR/XOR.
37196 OPERANDS is an array containing the destination and two input operands.
37197 CODE is the base operation (AND, IOR, XOR, NOT).
37198 MODE is the machine mode.
37199 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37200 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37201 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37202 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37203 formation of the AND instructions. */
37205 static void
37206 rs6000_split_logical_di (rtx operands[3],
37207 enum rtx_code code,
37208 bool complement_final_p,
37209 bool complement_op1_p,
37210 bool complement_op2_p)
37212 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37213 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37214 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37215 enum hi_lo { hi = 0, lo = 1 };
37216 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37217 size_t i;
37219 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37220 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37221 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37222 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37224 if (code == NOT)
37225 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37226 else
37228 if (GET_CODE (operands[2]) != CONST_INT)
37230 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37231 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37233 else
37235 HOST_WIDE_INT value = INTVAL (operands[2]);
37236 HOST_WIDE_INT value_hi_lo[2];
37238 gcc_assert (!complement_final_p);
37239 gcc_assert (!complement_op1_p);
37240 gcc_assert (!complement_op2_p);
37242 value_hi_lo[hi] = value >> 32;
37243 value_hi_lo[lo] = value & lower_32bits;
37245 for (i = 0; i < 2; i++)
37247 HOST_WIDE_INT sub_value = value_hi_lo[i];
37249 if (sub_value & sign_bit)
37250 sub_value |= upper_32bits;
37252 op2_hi_lo[i] = GEN_INT (sub_value);
37254 /* If this is an AND instruction, check to see if we need to load
37255 the value in a register. */
37256 if (code == AND && sub_value != -1 && sub_value != 0
37257 && !and_operand (op2_hi_lo[i], SImode))
37258 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37263 for (i = 0; i < 2; i++)
37265 /* Split large IOR/XOR operations. */
37266 if ((code == IOR || code == XOR)
37267 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37268 && !complement_final_p
37269 && !complement_op1_p
37270 && !complement_op2_p
37271 && !logical_const_operand (op2_hi_lo[i], SImode))
37273 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37274 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37275 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37276 rtx tmp = gen_reg_rtx (SImode);
37278 /* Make sure the constant is sign extended. */
37279 if ((hi_16bits & sign_bit) != 0)
37280 hi_16bits |= upper_32bits;
37282 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37283 code, SImode, false, false, false);
37285 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37286 code, SImode, false, false, false);
37288 else
37289 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37290 code, SImode, complement_final_p,
37291 complement_op1_p, complement_op2_p);
37294 return;
37297 /* Split the insns that make up boolean operations operating on multiple GPR
37298 registers. The boolean MD patterns ensure that the inputs either are
37299 exactly the same as the output registers, or there is no overlap.
37301 OPERANDS is an array containing the destination and two input operands.
37302 CODE is the base operation (AND, IOR, XOR, NOT).
37303 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37304 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37305 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37307 void
37308 rs6000_split_logical (rtx operands[3],
37309 enum rtx_code code,
37310 bool complement_final_p,
37311 bool complement_op1_p,
37312 bool complement_op2_p)
37314 machine_mode mode = GET_MODE (operands[0]);
37315 machine_mode sub_mode;
37316 rtx op0, op1, op2;
37317 int sub_size, regno0, regno1, nregs, i;
37319 /* If this is DImode, use the specialized version that can run before
37320 register allocation. */
37321 if (mode == DImode && !TARGET_POWERPC64)
37323 rs6000_split_logical_di (operands, code, complement_final_p,
37324 complement_op1_p, complement_op2_p);
37325 return;
37328 op0 = operands[0];
37329 op1 = operands[1];
37330 op2 = (code == NOT) ? NULL_RTX : operands[2];
37331 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37332 sub_size = GET_MODE_SIZE (sub_mode);
37333 regno0 = REGNO (op0);
37334 regno1 = REGNO (op1);
37336 gcc_assert (reload_completed);
37337 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37338 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37340 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37341 gcc_assert (nregs > 1);
37343 if (op2 && REG_P (op2))
37344 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37346 for (i = 0; i < nregs; i++)
37348 int offset = i * sub_size;
37349 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37350 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37351 rtx sub_op2 = ((code == NOT)
37352 ? NULL_RTX
37353 : simplify_subreg (sub_mode, op2, mode, offset));
37355 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37356 complement_final_p, complement_op1_p,
37357 complement_op2_p);
37360 return;
37364 /* Return true if the peephole2 can combine a load involving a combination of
37365 an addis instruction and a load with an offset that can be fused together on
37366 a power8. */
37368 bool
37369 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
37370 rtx addis_value, /* addis value. */
37371 rtx target, /* target register that is loaded. */
37372 rtx mem) /* bottom part of the memory addr. */
37374 rtx addr;
37375 rtx base_reg;
37377 /* Validate arguments. */
37378 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37379 return false;
37381 if (!base_reg_operand (target, GET_MODE (target)))
37382 return false;
37384 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37385 return false;
37387 /* Allow sign/zero extension. */
37388 if (GET_CODE (mem) == ZERO_EXTEND
37389 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
37390 mem = XEXP (mem, 0);
37392 if (!MEM_P (mem))
37393 return false;
37395 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
37396 return false;
37398 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37399 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
37400 return false;
37402 /* Validate that the register used to load the high value is either the
37403 register being loaded, or we can safely replace its use.
37405 This function is only called from the peephole2 pass and we assume that
37406 there are 2 instructions in the peephole (addis and load), so we want to
37407 check if the target register was not used in the memory address and the
37408 register to hold the addis result is dead after the peephole. */
37409 if (REGNO (addis_reg) != REGNO (target))
37411 if (reg_mentioned_p (target, mem))
37412 return false;
37414 if (!peep2_reg_dead_p (2, addis_reg))
37415 return false;
37417 /* If the target register being loaded is the stack pointer, we must
37418 avoid loading any other value into it, even temporarily. */
37419 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37420 return false;
37423 base_reg = XEXP (addr, 0);
37424 return REGNO (addis_reg) == REGNO (base_reg);
37427 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37428 sequence. We adjust the addis register to use the target register. If the
37429 load sign extends, we adjust the code to do the zero extending load, and an
37430 explicit sign extension later since the fusion only covers zero extending
37431 loads.
37433 The operands are:
37434 operands[0] register set with addis (to be replaced with target)
37435 operands[1] value set via addis
37436 operands[2] target register being loaded
37437 operands[3] D-form memory reference using operands[0]. */
37439 void
37440 expand_fusion_gpr_load (rtx *operands)
37442 rtx addis_value = operands[1];
37443 rtx target = operands[2];
37444 rtx orig_mem = operands[3];
37445 rtx new_addr, new_mem, orig_addr, offset;
37446 enum rtx_code plus_or_lo_sum;
37447 machine_mode target_mode = GET_MODE (target);
37448 machine_mode extend_mode = target_mode;
37449 machine_mode ptr_mode = Pmode;
37450 enum rtx_code extend = UNKNOWN;
37452 if (GET_CODE (orig_mem) == ZERO_EXTEND
37453 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37455 extend = GET_CODE (orig_mem);
37456 orig_mem = XEXP (orig_mem, 0);
37457 target_mode = GET_MODE (orig_mem);
37460 gcc_assert (MEM_P (orig_mem));
37462 orig_addr = XEXP (orig_mem, 0);
37463 plus_or_lo_sum = GET_CODE (orig_addr);
37464 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37466 offset = XEXP (orig_addr, 1);
37467 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37468 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37470 if (extend != UNKNOWN)
37471 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
37473 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37474 UNSPEC_FUSION_GPR);
37475 emit_insn (gen_rtx_SET (target, new_mem));
37477 if (extend == SIGN_EXTEND)
37479 int sub_off = ((BYTES_BIG_ENDIAN)
37480 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
37481 : 0);
37482 rtx sign_reg
37483 = simplify_subreg (target_mode, target, extend_mode, sub_off);
37485 emit_insn (gen_rtx_SET (target,
37486 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
37489 return;
37492 /* Emit the addis instruction that will be part of a fused instruction
37493 sequence. */
37495 void
37496 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
37497 const char *mode_name)
37499 rtx fuse_ops[10];
37500 char insn_template[80];
37501 const char *addis_str = NULL;
37502 const char *comment_str = ASM_COMMENT_START;
37504 if (*comment_str == ' ')
37505 comment_str++;
37507 /* Emit the addis instruction. */
37508 fuse_ops[0] = target;
37509 if (satisfies_constraint_L (addis_value))
37511 fuse_ops[1] = addis_value;
37512 addis_str = "lis %0,%v1";
37515 else if (GET_CODE (addis_value) == PLUS)
37517 rtx op0 = XEXP (addis_value, 0);
37518 rtx op1 = XEXP (addis_value, 1);
37520 if (REG_P (op0) && CONST_INT_P (op1)
37521 && satisfies_constraint_L (op1))
37523 fuse_ops[1] = op0;
37524 fuse_ops[2] = op1;
37525 addis_str = "addis %0,%1,%v2";
37529 else if (GET_CODE (addis_value) == HIGH)
37531 rtx value = XEXP (addis_value, 0);
37532 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
37534 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
37535 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
37536 if (TARGET_ELF)
37537 addis_str = "addis %0,%2,%1@toc@ha";
37539 else if (TARGET_XCOFF)
37540 addis_str = "addis %0,%1@u(%2)";
37542 else
37543 gcc_unreachable ();
37546 else if (GET_CODE (value) == PLUS)
37548 rtx op0 = XEXP (value, 0);
37549 rtx op1 = XEXP (value, 1);
37551 if (GET_CODE (op0) == UNSPEC
37552 && XINT (op0, 1) == UNSPEC_TOCREL
37553 && CONST_INT_P (op1))
37555 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
37556 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
37557 fuse_ops[3] = op1;
37558 if (TARGET_ELF)
37559 addis_str = "addis %0,%2,%1+%3@toc@ha";
37561 else if (TARGET_XCOFF)
37562 addis_str = "addis %0,%1+%3@u(%2)";
37564 else
37565 gcc_unreachable ();
37569 else if (satisfies_constraint_L (value))
37571 fuse_ops[1] = value;
37572 addis_str = "lis %0,%v1";
37575 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
37577 fuse_ops[1] = value;
37578 addis_str = "lis %0,%1@ha";
37582 if (!addis_str)
37583 fatal_insn ("Could not generate addis value for fusion", addis_value);
37585 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
37586 comment, mode_name);
37587 output_asm_insn (insn_template, fuse_ops);
37590 /* Emit a D-form load or store instruction that is the second instruction
37591 of a fusion sequence. */
37593 void
37594 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
37595 const char *insn_str)
37597 rtx fuse_ops[10];
37598 char insn_template[80];
37600 fuse_ops[0] = load_store_reg;
37601 fuse_ops[1] = addis_reg;
37603 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
37605 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
37606 fuse_ops[2] = offset;
37607 output_asm_insn (insn_template, fuse_ops);
37610 else if (GET_CODE (offset) == UNSPEC
37611 && XINT (offset, 1) == UNSPEC_TOCREL)
37613 if (TARGET_ELF)
37614 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
37616 else if (TARGET_XCOFF)
37617 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37619 else
37620 gcc_unreachable ();
37622 fuse_ops[2] = XVECEXP (offset, 0, 0);
37623 output_asm_insn (insn_template, fuse_ops);
37626 else if (GET_CODE (offset) == PLUS
37627 && GET_CODE (XEXP (offset, 0)) == UNSPEC
37628 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
37629 && CONST_INT_P (XEXP (offset, 1)))
37631 rtx tocrel_unspec = XEXP (offset, 0);
37632 if (TARGET_ELF)
37633 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
37635 else if (TARGET_XCOFF)
37636 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
37638 else
37639 gcc_unreachable ();
37641 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
37642 fuse_ops[3] = XEXP (offset, 1);
37643 output_asm_insn (insn_template, fuse_ops);
37646 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
37648 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37650 fuse_ops[2] = offset;
37651 output_asm_insn (insn_template, fuse_ops);
37654 else
37655 fatal_insn ("Unable to generate load/store offset for fusion", offset);
37657 return;
37660 /* Wrap a TOC address that can be fused to indicate that special fusion
37661 processing is needed. */
37664 fusion_wrap_memory_address (rtx old_mem)
37666 rtx old_addr = XEXP (old_mem, 0);
37667 rtvec v = gen_rtvec (1, old_addr);
37668 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
37669 return replace_equiv_address_nv (old_mem, new_addr, false);
37672 /* Given an address, convert it into the addis and load offset parts. Addresses
37673 created during the peephole2 process look like:
37674 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
37675 (unspec [(...)] UNSPEC_TOCREL))
37677 Addresses created via toc fusion look like:
37678 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
37680 static void
37681 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
37683 rtx hi, lo;
37685 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
37687 lo = XVECEXP (addr, 0, 0);
37688 hi = gen_rtx_HIGH (Pmode, lo);
37690 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
37692 hi = XEXP (addr, 0);
37693 lo = XEXP (addr, 1);
37695 else
37696 gcc_unreachable ();
37698 *p_hi = hi;
37699 *p_lo = lo;
37702 /* Return a string to fuse an addis instruction with a gpr load to the same
37703 register that we loaded up the addis instruction. The address that is used
37704 is the logical address that was formed during peephole2:
37705 (lo_sum (high) (low-part))
37707 Or the address is the TOC address that is wrapped before register allocation:
37708 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
37710 The code is complicated, so we call output_asm_insn directly, and just
37711 return "". */
37713 const char *
37714 emit_fusion_gpr_load (rtx target, rtx mem)
37716 rtx addis_value;
37717 rtx addr;
37718 rtx load_offset;
37719 const char *load_str = NULL;
37720 const char *mode_name = NULL;
37721 machine_mode mode;
37723 if (GET_CODE (mem) == ZERO_EXTEND)
37724 mem = XEXP (mem, 0);
37726 gcc_assert (REG_P (target) && MEM_P (mem));
37728 addr = XEXP (mem, 0);
37729 fusion_split_address (addr, &addis_value, &load_offset);
37731 /* Now emit the load instruction to the same register. */
37732 mode = GET_MODE (mem);
37733 switch (mode)
37735 case QImode:
37736 mode_name = "char";
37737 load_str = "lbz";
37738 break;
37740 case HImode:
37741 mode_name = "short";
37742 load_str = "lhz";
37743 break;
37745 case SImode:
37746 case SFmode:
37747 mode_name = (mode == SFmode) ? "float" : "int";
37748 load_str = "lwz";
37749 break;
37751 case DImode:
37752 case DFmode:
37753 gcc_assert (TARGET_POWERPC64);
37754 mode_name = (mode == DFmode) ? "double" : "long";
37755 load_str = "ld";
37756 break;
37758 default:
37759 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
37762 /* Emit the addis instruction. */
37763 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
37765 /* Emit the D-form load instruction. */
37766 emit_fusion_load_store (target, target, load_offset, load_str);
37768 return "";
37772 /* Return true if the peephole2 can combine a load/store involving a
37773 combination of an addis instruction and the memory operation. This was
37774 added to the ISA 3.0 (power9) hardware. */
37776 bool
37777 fusion_p9_p (rtx addis_reg, /* register set via addis. */
37778 rtx addis_value, /* addis value. */
37779 rtx dest, /* destination (memory or register). */
37780 rtx src) /* source (register or memory). */
37782 rtx addr, mem, offset;
37783 enum machine_mode mode = GET_MODE (src);
37785 /* Validate arguments. */
37786 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37787 return false;
37789 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37790 return false;
37792 /* Ignore extend operations that are part of the load. */
37793 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
37794 src = XEXP (src, 0);
37796 /* Test for memory<-register or register<-memory. */
37797 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
37799 if (!MEM_P (dest))
37800 return false;
37802 mem = dest;
37805 else if (MEM_P (src))
37807 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
37808 return false;
37810 mem = src;
37813 else
37814 return false;
37816 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37817 if (GET_CODE (addr) == PLUS)
37819 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
37820 return false;
37822 return satisfies_constraint_I (XEXP (addr, 1));
37825 else if (GET_CODE (addr) == LO_SUM)
37827 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
37828 return false;
37830 offset = XEXP (addr, 1);
37831 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
37832 return small_toc_ref (offset, GET_MODE (offset));
37834 else if (TARGET_ELF && !TARGET_POWERPC64)
37835 return CONSTANT_P (offset);
37838 return false;
37841 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
37842 load sequence.
37844 The operands are:
37845 operands[0] register set with addis
37846 operands[1] value set via addis
37847 operands[2] target register being loaded
37848 operands[3] D-form memory reference using operands[0].
37850 This is similar to the fusion introduced with power8, except it scales to
37851 both loads/stores and does not require the result register to be the same as
37852 the base register. At the moment, we only do this if register set with addis
37853 is dead. */
37855 void
37856 expand_fusion_p9_load (rtx *operands)
37858 rtx tmp_reg = operands[0];
37859 rtx addis_value = operands[1];
37860 rtx target = operands[2];
37861 rtx orig_mem = operands[3];
37862 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
37863 enum rtx_code plus_or_lo_sum;
37864 machine_mode target_mode = GET_MODE (target);
37865 machine_mode extend_mode = target_mode;
37866 machine_mode ptr_mode = Pmode;
37867 enum rtx_code extend = UNKNOWN;
37869 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
37871 extend = GET_CODE (orig_mem);
37872 orig_mem = XEXP (orig_mem, 0);
37873 target_mode = GET_MODE (orig_mem);
37876 gcc_assert (MEM_P (orig_mem));
37878 orig_addr = XEXP (orig_mem, 0);
37879 plus_or_lo_sum = GET_CODE (orig_addr);
37880 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37882 offset = XEXP (orig_addr, 1);
37883 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37884 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37886 if (extend != UNKNOWN)
37887 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
37889 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37890 UNSPEC_FUSION_P9);
37892 set = gen_rtx_SET (target, new_mem);
37893 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
37894 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
37895 emit_insn (insn);
37897 return;
37900 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
37901 store sequence.
37903 The operands are:
37904 operands[0] register set with addis
37905 operands[1] value set via addis
37906 operands[2] target D-form memory being stored to
37907 operands[3] register being stored
37909 This is similar to the fusion introduced with power8, except it scales to
37910 both loads/stores and does not require the result register to be the same as
37911 the base register. At the moment, we only do this if register set with addis
37912 is dead. */
37914 void
37915 expand_fusion_p9_store (rtx *operands)
37917 rtx tmp_reg = operands[0];
37918 rtx addis_value = operands[1];
37919 rtx orig_mem = operands[2];
37920 rtx src = operands[3];
37921 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
37922 enum rtx_code plus_or_lo_sum;
37923 machine_mode target_mode = GET_MODE (orig_mem);
37924 machine_mode ptr_mode = Pmode;
37926 gcc_assert (MEM_P (orig_mem));
37928 orig_addr = XEXP (orig_mem, 0);
37929 plus_or_lo_sum = GET_CODE (orig_addr);
37930 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37932 offset = XEXP (orig_addr, 1);
37933 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37934 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37936 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
37937 UNSPEC_FUSION_P9);
37939 set = gen_rtx_SET (new_mem, new_src);
37940 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
37941 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
37942 emit_insn (insn);
37944 return;
37947 /* Return a string to fuse an addis instruction with a load using extended
37948 fusion. The address that is used is the logical address that was formed
37949 during peephole2: (lo_sum (high) (low-part))
37951 The code is complicated, so we call output_asm_insn directly, and just
37952 return "". */
37954 const char *
37955 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
37957 enum machine_mode mode = GET_MODE (reg);
37958 rtx hi;
37959 rtx lo;
37960 rtx addr;
37961 const char *load_string;
37962 int r;
37964 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
37966 mem = XEXP (mem, 0);
37967 mode = GET_MODE (mem);
37970 if (GET_CODE (reg) == SUBREG)
37972 gcc_assert (SUBREG_BYTE (reg) == 0);
37973 reg = SUBREG_REG (reg);
37976 if (!REG_P (reg))
37977 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
37979 r = REGNO (reg);
37980 if (FP_REGNO_P (r))
37982 if (mode == SFmode)
37983 load_string = "lfs";
37984 else if (mode == DFmode || mode == DImode)
37985 load_string = "lfd";
37986 else
37987 gcc_unreachable ();
37989 else if (INT_REGNO_P (r))
37991 switch (mode)
37993 case QImode:
37994 load_string = "lbz";
37995 break;
37996 case HImode:
37997 load_string = "lhz";
37998 break;
37999 case SImode:
38000 case SFmode:
38001 load_string = "lwz";
38002 break;
38003 case DImode:
38004 case DFmode:
38005 if (!TARGET_POWERPC64)
38006 gcc_unreachable ();
38007 load_string = "ld";
38008 break;
38009 default:
38010 gcc_unreachable ();
38013 else
38014 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38016 if (!MEM_P (mem))
38017 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38019 addr = XEXP (mem, 0);
38020 fusion_split_address (addr, &hi, &lo);
38022 /* Emit the addis instruction. */
38023 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
38025 /* Emit the D-form load instruction. */
38026 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38028 return "";
38031 /* Return a string to fuse an addis instruction with a store using extended
38032 fusion. The address that is used is the logical address that was formed
38033 during peephole2: (lo_sum (high) (low-part))
38035 The code is complicated, so we call output_asm_insn directly, and just
38036 return "". */
38038 const char *
38039 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38041 enum machine_mode mode = GET_MODE (reg);
38042 rtx hi;
38043 rtx lo;
38044 rtx addr;
38045 const char *store_string;
38046 int r;
38048 if (GET_CODE (reg) == SUBREG)
38050 gcc_assert (SUBREG_BYTE (reg) == 0);
38051 reg = SUBREG_REG (reg);
38054 if (!REG_P (reg))
38055 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38057 r = REGNO (reg);
38058 if (FP_REGNO_P (r))
38060 if (mode == SFmode)
38061 store_string = "stfs";
38062 else if (mode == DFmode)
38063 store_string = "stfd";
38064 else
38065 gcc_unreachable ();
38067 else if (INT_REGNO_P (r))
38069 switch (mode)
38071 case QImode:
38072 store_string = "stb";
38073 break;
38074 case HImode:
38075 store_string = "sth";
38076 break;
38077 case SImode:
38078 case SFmode:
38079 store_string = "stw";
38080 break;
38081 case DImode:
38082 case DFmode:
38083 if (!TARGET_POWERPC64)
38084 gcc_unreachable ();
38085 store_string = "std";
38086 break;
38087 default:
38088 gcc_unreachable ();
38091 else
38092 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38094 if (!MEM_P (mem))
38095 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38097 addr = XEXP (mem, 0);
38098 fusion_split_address (addr, &hi, &lo);
38100 /* Emit the addis instruction. */
38101 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
38103 /* Emit the D-form load instruction. */
38104 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38106 return "";
38110 /* Analyze vector computations and remove unnecessary doubleword
38111 swaps (xxswapdi instructions). This pass is performed only
38112 for little-endian VSX code generation.
38114 For this specific case, loads and stores of 4x32 and 2x64 vectors
38115 are inefficient. These are implemented using the lvx2dx and
38116 stvx2dx instructions, which invert the order of doublewords in
38117 a vector register. Thus the code generation inserts an xxswapdi
38118 after each such load, and prior to each such store. (For spill
38119 code after register assignment, an additional xxswapdi is inserted
38120 following each store in order to return a hard register to its
38121 unpermuted value.)
38123 The extra xxswapdi instructions reduce performance. This can be
38124 particularly bad for vectorized code. The purpose of this pass
38125 is to reduce the number of xxswapdi instructions required for
38126 correctness.
38128 The primary insight is that much code that operates on vectors
38129 does not care about the relative order of elements in a register,
38130 so long as the correct memory order is preserved. If we have
38131 a computation where all input values are provided by lvxd2x/xxswapdi
38132 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
38133 and all intermediate computations are pure SIMD (independent of
38134 element order), then all the xxswapdi's associated with the loads
38135 and stores may be removed.
38137 This pass uses some of the infrastructure and logical ideas from
38138 the "web" pass in web.c. We create maximal webs of computations
38139 fitting the description above using union-find. Each such web is
38140 then optimized by removing its unnecessary xxswapdi instructions.
38142 The pass is placed prior to global optimization so that we can
38143 perform the optimization in the safest and simplest way possible;
38144 that is, by replacing each xxswapdi insn with a register copy insn.
38145 Subsequent forward propagation will remove copies where possible.
38147 There are some operations sensitive to element order for which we
38148 can still allow the operation, provided we modify those operations.
38149 These include CONST_VECTORs, for which we must swap the first and
38150 second halves of the constant vector; and SUBREGs, for which we
38151 must adjust the byte offset to account for the swapped doublewords.
38152 A remaining opportunity would be non-immediate-form splats, for
38153 which we should adjust the selected lane of the input. We should
38154 also make code generation adjustments for sum-across operations,
38155 since this is a common vectorizer reduction.
38157 Because we run prior to the first split, we can see loads and stores
38158 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
38159 vector loads and stores that have not yet been split into a permuting
38160 load/store and a swap. (One way this can happen is with a builtin
38161 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
38162 than deleting a swap, we convert the load/store into a permuting
38163 load/store (which effectively removes the swap). */
38165 /* Notes on Permutes
38167 We do not currently handle computations that contain permutes. There
38168 is a general transformation that can be performed correctly, but it
38169 may introduce more expensive code than it replaces. To handle these
38170 would require a cost model to determine when to perform the optimization.
38171 This commentary records how this could be done if desired.
38173 The most general permute is something like this (example for V16QI):
38175 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
38176 (parallel [(const_int a0) (const_int a1)
38178 (const_int a14) (const_int a15)]))
38180 where a0,...,a15 are in [0,31] and select elements from op1 and op2
38181 to produce in the result.
38183 Regardless of mode, we can convert the PARALLEL to a mask of 16
38184 byte-element selectors. Let's call this M, with M[i] representing
38185 the ith byte-element selector value. Then if we swap doublewords
38186 throughout the computation, we can get correct behavior by replacing
38187 M with M' as follows:
38189 M'[i] = { (M[i]+8)%16 : M[i] in [0,15]
38190 { ((M[i]+8)%16)+16 : M[i] in [16,31]
38192 This seems promising at first, since we are just replacing one mask
38193 with another. But certain masks are preferable to others. If M
38194 is a mask that matches a vmrghh pattern, for example, M' certainly
38195 will not. Instead of a single vmrghh, we would generate a load of
38196 M' and a vperm. So we would need to know how many xxswapd's we can
38197 remove as a result of this transformation to determine if it's
38198 profitable; and preferably the logic would need to be aware of all
38199 the special preferable masks.
38201 Another form of permute is an UNSPEC_VPERM, in which the mask is
38202 already in a register. In some cases, this mask may be a constant
38203 that we can discover with ud-chains, in which case the above
38204 transformation is ok. However, the common usage here is for the
38205 mask to be produced by an UNSPEC_LVSL, in which case the mask
38206 cannot be known at compile time. In such a case we would have to
38207 generate several instructions to compute M' as above at run time,
38208 and a cost model is needed again.
38210 However, when the mask M for an UNSPEC_VPERM is loaded from the
38211 constant pool, we can replace M with M' as above at no cost
38212 beyond adding a constant pool entry. */
38214 /* This is based on the union-find logic in web.c. web_entry_base is
38215 defined in df.h. */
38216 class swap_web_entry : public web_entry_base
38218 public:
38219 /* Pointer to the insn. */
38220 rtx_insn *insn;
38221 /* Set if insn contains a mention of a vector register. All other
38222 fields are undefined if this field is unset. */
38223 unsigned int is_relevant : 1;
38224 /* Set if insn is a load. */
38225 unsigned int is_load : 1;
38226 /* Set if insn is a store. */
38227 unsigned int is_store : 1;
38228 /* Set if insn is a doubleword swap. This can either be a register swap
38229 or a permuting load or store (test is_load and is_store for this). */
38230 unsigned int is_swap : 1;
38231 /* Set if the insn has a live-in use of a parameter register. */
38232 unsigned int is_live_in : 1;
38233 /* Set if the insn has a live-out def of a return register. */
38234 unsigned int is_live_out : 1;
38235 /* Set if the insn contains a subreg reference of a vector register. */
38236 unsigned int contains_subreg : 1;
38237 /* Set if the insn contains a 128-bit integer operand. */
38238 unsigned int is_128_int : 1;
38239 /* Set if this is a call-insn. */
38240 unsigned int is_call : 1;
38241 /* Set if this insn does not perform a vector operation for which
38242 element order matters, or if we know how to fix it up if it does.
38243 Undefined if is_swap is set. */
38244 unsigned int is_swappable : 1;
38245 /* A nonzero value indicates what kind of special handling for this
38246 insn is required if doublewords are swapped. Undefined if
38247 is_swappable is not set. */
38248 unsigned int special_handling : 4;
38249 /* Set if the web represented by this entry cannot be optimized. */
38250 unsigned int web_not_optimizable : 1;
38251 /* Set if this insn should be deleted. */
38252 unsigned int will_delete : 1;
38255 enum special_handling_values {
38256 SH_NONE = 0,
38257 SH_CONST_VECTOR,
38258 SH_SUBREG,
38259 SH_NOSWAP_LD,
38260 SH_NOSWAP_ST,
38261 SH_EXTRACT,
38262 SH_SPLAT,
38263 SH_XXPERMDI,
38264 SH_CONCAT,
38265 SH_VPERM
38268 /* Union INSN with all insns containing definitions that reach USE.
38269 Detect whether USE is live-in to the current function. */
38270 static void
38271 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
38273 struct df_link *link = DF_REF_CHAIN (use);
38275 if (!link)
38276 insn_entry[INSN_UID (insn)].is_live_in = 1;
38278 while (link)
38280 if (DF_REF_IS_ARTIFICIAL (link->ref))
38281 insn_entry[INSN_UID (insn)].is_live_in = 1;
38283 if (DF_REF_INSN_INFO (link->ref))
38285 rtx def_insn = DF_REF_INSN (link->ref);
38286 (void)unionfind_union (insn_entry + INSN_UID (insn),
38287 insn_entry + INSN_UID (def_insn));
38290 link = link->next;
38294 /* Union INSN with all insns containing uses reached from DEF.
38295 Detect whether DEF is live-out from the current function. */
38296 static void
38297 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
38299 struct df_link *link = DF_REF_CHAIN (def);
38301 if (!link)
38302 insn_entry[INSN_UID (insn)].is_live_out = 1;
38304 while (link)
38306 /* This could be an eh use or some other artificial use;
38307 we treat these all the same (killing the optimization). */
38308 if (DF_REF_IS_ARTIFICIAL (link->ref))
38309 insn_entry[INSN_UID (insn)].is_live_out = 1;
38311 if (DF_REF_INSN_INFO (link->ref))
38313 rtx use_insn = DF_REF_INSN (link->ref);
38314 (void)unionfind_union (insn_entry + INSN_UID (insn),
38315 insn_entry + INSN_UID (use_insn));
38318 link = link->next;
38322 /* Return 1 iff INSN is a load insn, including permuting loads that
38323 represent an lvxd2x instruction; else return 0. */
38324 static unsigned int
38325 insn_is_load_p (rtx insn)
38327 rtx body = PATTERN (insn);
38329 if (GET_CODE (body) == SET)
38331 if (GET_CODE (SET_SRC (body)) == MEM)
38332 return 1;
38334 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
38335 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
38336 return 1;
38338 return 0;
38341 if (GET_CODE (body) != PARALLEL)
38342 return 0;
38344 rtx set = XVECEXP (body, 0, 0);
38346 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
38347 return 1;
38349 return 0;
38352 /* Return 1 iff INSN is a store insn, including permuting stores that
38353 represent an stvxd2x instruction; else return 0. */
38354 static unsigned int
38355 insn_is_store_p (rtx insn)
38357 rtx body = PATTERN (insn);
38358 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
38359 return 1;
38360 if (GET_CODE (body) != PARALLEL)
38361 return 0;
38362 rtx set = XVECEXP (body, 0, 0);
38363 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
38364 return 1;
38365 return 0;
38368 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
38369 a permuting load, or a permuting store. */
38370 static unsigned int
38371 insn_is_swap_p (rtx insn)
38373 rtx body = PATTERN (insn);
38374 if (GET_CODE (body) != SET)
38375 return 0;
38376 rtx rhs = SET_SRC (body);
38377 if (GET_CODE (rhs) != VEC_SELECT)
38378 return 0;
38379 rtx parallel = XEXP (rhs, 1);
38380 if (GET_CODE (parallel) != PARALLEL)
38381 return 0;
38382 unsigned int len = XVECLEN (parallel, 0);
38383 if (len != 2 && len != 4 && len != 8 && len != 16)
38384 return 0;
38385 for (unsigned int i = 0; i < len / 2; ++i)
38387 rtx op = XVECEXP (parallel, 0, i);
38388 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
38389 return 0;
38391 for (unsigned int i = len / 2; i < len; ++i)
38393 rtx op = XVECEXP (parallel, 0, i);
38394 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
38395 return 0;
38397 return 1;
38400 /* Return TRUE if insn is a swap fed by a load from the constant pool. */
38401 static bool
38402 const_load_sequence_p (swap_web_entry *insn_entry, rtx insn)
38404 unsigned uid = INSN_UID (insn);
38405 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load)
38406 return false;
38408 /* Find the unique use in the swap and locate its def. If the def
38409 isn't unique, punt. */
38410 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38411 df_ref use;
38412 FOR_EACH_INSN_INFO_USE (use, insn_info)
38414 struct df_link *def_link = DF_REF_CHAIN (use);
38415 if (!def_link || def_link->next)
38416 return false;
38418 rtx def_insn = DF_REF_INSN (def_link->ref);
38419 unsigned uid2 = INSN_UID (def_insn);
38420 if (!insn_entry[uid2].is_load || !insn_entry[uid2].is_swap)
38421 return false;
38423 rtx body = PATTERN (def_insn);
38424 if (GET_CODE (body) != SET
38425 || GET_CODE (SET_SRC (body)) != VEC_SELECT
38426 || GET_CODE (XEXP (SET_SRC (body), 0)) != MEM)
38427 return false;
38429 rtx mem = XEXP (SET_SRC (body), 0);
38430 rtx base_reg = XEXP (mem, 0);
38432 df_ref base_use;
38433 insn_info = DF_INSN_INFO_GET (def_insn);
38434 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
38436 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
38437 continue;
38439 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
38440 if (!base_def_link || base_def_link->next)
38441 return false;
38443 rtx tocrel_insn = DF_REF_INSN (base_def_link->ref);
38444 rtx tocrel_body = PATTERN (tocrel_insn);
38445 rtx base, offset;
38446 if (GET_CODE (tocrel_body) != SET)
38447 return false;
38448 /* There is an extra level of indirection for small/large
38449 code models. */
38450 rtx tocrel_expr = SET_SRC (tocrel_body);
38451 if (GET_CODE (tocrel_expr) == MEM)
38452 tocrel_expr = XEXP (tocrel_expr, 0);
38453 if (!toc_relative_expr_p (tocrel_expr, false))
38454 return false;
38455 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
38456 if (GET_CODE (base) != SYMBOL_REF || !CONSTANT_POOL_ADDRESS_P (base))
38457 return false;
38460 return true;
38463 /* Return TRUE iff OP matches a V2DF reduction pattern. See the
38464 definition of vsx_reduc_<VEC_reduc_name>_v2df in vsx.md. */
38465 static bool
38466 v2df_reduction_p (rtx op)
38468 if (GET_MODE (op) != V2DFmode)
38469 return false;
38471 enum rtx_code code = GET_CODE (op);
38472 if (code != PLUS && code != SMIN && code != SMAX)
38473 return false;
38475 rtx concat = XEXP (op, 0);
38476 if (GET_CODE (concat) != VEC_CONCAT)
38477 return false;
38479 rtx select0 = XEXP (concat, 0);
38480 rtx select1 = XEXP (concat, 1);
38481 if (GET_CODE (select0) != VEC_SELECT || GET_CODE (select1) != VEC_SELECT)
38482 return false;
38484 rtx reg0 = XEXP (select0, 0);
38485 rtx reg1 = XEXP (select1, 0);
38486 if (!rtx_equal_p (reg0, reg1) || !REG_P (reg0))
38487 return false;
38489 rtx parallel0 = XEXP (select0, 1);
38490 rtx parallel1 = XEXP (select1, 1);
38491 if (GET_CODE (parallel0) != PARALLEL || GET_CODE (parallel1) != PARALLEL)
38492 return false;
38494 if (!rtx_equal_p (XVECEXP (parallel0, 0, 0), const1_rtx)
38495 || !rtx_equal_p (XVECEXP (parallel1, 0, 0), const0_rtx))
38496 return false;
38498 return true;
38501 /* Return 1 iff OP is an operand that will not be affected by having
38502 vector doublewords swapped in memory. */
38503 static unsigned int
38504 rtx_is_swappable_p (rtx op, unsigned int *special)
38506 enum rtx_code code = GET_CODE (op);
38507 int i, j;
38508 rtx parallel;
38510 switch (code)
38512 case LABEL_REF:
38513 case SYMBOL_REF:
38514 case CLOBBER:
38515 case REG:
38516 return 1;
38518 case VEC_CONCAT:
38519 case ASM_INPUT:
38520 case ASM_OPERANDS:
38521 return 0;
38523 case CONST_VECTOR:
38525 *special = SH_CONST_VECTOR;
38526 return 1;
38529 case VEC_DUPLICATE:
38530 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
38531 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
38532 it represents a vector splat for which we can do special
38533 handling. */
38534 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
38535 return 1;
38536 else if (GET_CODE (XEXP (op, 0)) == REG
38537 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
38538 /* This catches V2DF and V2DI splat, at a minimum. */
38539 return 1;
38540 else if (GET_CODE (XEXP (op, 0)) == VEC_SELECT)
38541 /* If the duplicated item is from a select, defer to the select
38542 processing to see if we can change the lane for the splat. */
38543 return rtx_is_swappable_p (XEXP (op, 0), special);
38544 else
38545 return 0;
38547 case VEC_SELECT:
38548 /* A vec_extract operation is ok if we change the lane. */
38549 if (GET_CODE (XEXP (op, 0)) == REG
38550 && GET_MODE_INNER (GET_MODE (XEXP (op, 0))) == GET_MODE (op)
38551 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
38552 && XVECLEN (parallel, 0) == 1
38553 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT)
38555 *special = SH_EXTRACT;
38556 return 1;
38558 /* An XXPERMDI is ok if we adjust the lanes. Note that if the
38559 XXPERMDI is a swap operation, it will be identified by
38560 insn_is_swap_p and therefore we won't get here. */
38561 else if (GET_CODE (XEXP (op, 0)) == VEC_CONCAT
38562 && (GET_MODE (XEXP (op, 0)) == V4DFmode
38563 || GET_MODE (XEXP (op, 0)) == V4DImode)
38564 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
38565 && XVECLEN (parallel, 0) == 2
38566 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT
38567 && GET_CODE (XVECEXP (parallel, 0, 1)) == CONST_INT)
38569 *special = SH_XXPERMDI;
38570 return 1;
38572 else if (v2df_reduction_p (op))
38573 return 1;
38574 else
38575 return 0;
38577 case UNSPEC:
38579 /* Various operations are unsafe for this optimization, at least
38580 without significant additional work. Permutes are obviously
38581 problematic, as both the permute control vector and the ordering
38582 of the target values are invalidated by doubleword swapping.
38583 Vector pack and unpack modify the number of vector lanes.
38584 Merge-high/low will not operate correctly on swapped operands.
38585 Vector shifts across element boundaries are clearly uncool,
38586 as are vector select and concatenate operations. Vector
38587 sum-across instructions define one operand with a specific
38588 order-dependent element, so additional fixup code would be
38589 needed to make those work. Vector set and non-immediate-form
38590 vector splat are element-order sensitive. A few of these
38591 cases might be workable with special handling if required.
38592 Adding cost modeling would be appropriate in some cases. */
38593 int val = XINT (op, 1);
38594 switch (val)
38596 default:
38597 break;
38598 case UNSPEC_VMRGH_DIRECT:
38599 case UNSPEC_VMRGL_DIRECT:
38600 case UNSPEC_VPACK_SIGN_SIGN_SAT:
38601 case UNSPEC_VPACK_SIGN_UNS_SAT:
38602 case UNSPEC_VPACK_UNS_UNS_MOD:
38603 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT:
38604 case UNSPEC_VPACK_UNS_UNS_SAT:
38605 case UNSPEC_VPERM:
38606 case UNSPEC_VPERM_UNS:
38607 case UNSPEC_VPERMHI:
38608 case UNSPEC_VPERMSI:
38609 case UNSPEC_VPKPX:
38610 case UNSPEC_VSLDOI:
38611 case UNSPEC_VSLO:
38612 case UNSPEC_VSRO:
38613 case UNSPEC_VSUM2SWS:
38614 case UNSPEC_VSUM4S:
38615 case UNSPEC_VSUM4UBS:
38616 case UNSPEC_VSUMSWS:
38617 case UNSPEC_VSUMSWS_DIRECT:
38618 case UNSPEC_VSX_CONCAT:
38619 case UNSPEC_VSX_SET:
38620 case UNSPEC_VSX_SLDWI:
38621 case UNSPEC_VUNPACK_HI_SIGN:
38622 case UNSPEC_VUNPACK_HI_SIGN_DIRECT:
38623 case UNSPEC_VUNPACK_LO_SIGN:
38624 case UNSPEC_VUNPACK_LO_SIGN_DIRECT:
38625 case UNSPEC_VUPKHPX:
38626 case UNSPEC_VUPKHS_V4SF:
38627 case UNSPEC_VUPKHU_V4SF:
38628 case UNSPEC_VUPKLPX:
38629 case UNSPEC_VUPKLS_V4SF:
38630 case UNSPEC_VUPKLU_V4SF:
38631 case UNSPEC_VSX_CVDPSPN:
38632 case UNSPEC_VSX_CVSPDP:
38633 case UNSPEC_VSX_CVSPDPN:
38634 return 0;
38635 case UNSPEC_VSPLT_DIRECT:
38636 *special = SH_SPLAT;
38637 return 1;
38638 case UNSPEC_REDUC_PLUS:
38639 case UNSPEC_REDUC:
38640 return 1;
38644 default:
38645 break;
38648 const char *fmt = GET_RTX_FORMAT (code);
38649 int ok = 1;
38651 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
38652 if (fmt[i] == 'e' || fmt[i] == 'u')
38654 unsigned int special_op = SH_NONE;
38655 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
38656 if (special_op == SH_NONE)
38657 continue;
38658 /* Ensure we never have two kinds of special handling
38659 for the same insn. */
38660 if (*special != SH_NONE && *special != special_op)
38661 return 0;
38662 *special = special_op;
38664 else if (fmt[i] == 'E')
38665 for (j = 0; j < XVECLEN (op, i); ++j)
38667 unsigned int special_op = SH_NONE;
38668 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
38669 if (special_op == SH_NONE)
38670 continue;
38671 /* Ensure we never have two kinds of special handling
38672 for the same insn. */
38673 if (*special != SH_NONE && *special != special_op)
38674 return 0;
38675 *special = special_op;
38678 return ok;
38681 /* Return 1 iff INSN is an operand that will not be affected by
38682 having vector doublewords swapped in memory (in which case
38683 *SPECIAL is unchanged), or that can be modified to be correct
38684 if vector doublewords are swapped in memory (in which case
38685 *SPECIAL is changed to a value indicating how). */
38686 static unsigned int
38687 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
38688 unsigned int *special)
38690 /* Calls are always bad. */
38691 if (GET_CODE (insn) == CALL_INSN)
38692 return 0;
38694 /* Loads and stores seen here are not permuting, but we can still
38695 fix them up by converting them to permuting ones. Exceptions:
38696 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
38697 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
38698 for the SET source. Also we must now make an exception for lvx
38699 and stvx when they are not in the UNSPEC_LVX/STVX form (with the
38700 explicit "& -16") since this leads to unrecognizable insns. */
38701 rtx body = PATTERN (insn);
38702 int i = INSN_UID (insn);
38704 if (insn_entry[i].is_load)
38706 if (GET_CODE (body) == SET)
38708 rtx rhs = SET_SRC (body);
38709 gcc_assert (GET_CODE (rhs) == MEM);
38710 if (GET_CODE (XEXP (rhs, 0)) == AND)
38711 return 0;
38713 *special = SH_NOSWAP_LD;
38714 return 1;
38716 else
38717 return 0;
38720 if (insn_entry[i].is_store)
38722 if (GET_CODE (body) == SET
38723 && GET_CODE (SET_SRC (body)) != UNSPEC)
38725 rtx lhs = SET_DEST (body);
38726 gcc_assert (GET_CODE (lhs) == MEM);
38727 if (GET_CODE (XEXP (lhs, 0)) == AND)
38728 return 0;
38730 *special = SH_NOSWAP_ST;
38731 return 1;
38733 else
38734 return 0;
38737 /* A convert to single precision can be left as is provided that
38738 all of its uses are in xxspltw instructions that splat BE element
38739 zero. */
38740 if (GET_CODE (body) == SET
38741 && GET_CODE (SET_SRC (body)) == UNSPEC
38742 && XINT (SET_SRC (body), 1) == UNSPEC_VSX_CVDPSPN)
38744 df_ref def;
38745 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38747 FOR_EACH_INSN_INFO_DEF (def, insn_info)
38749 struct df_link *link = DF_REF_CHAIN (def);
38750 if (!link)
38751 return 0;
38753 for (; link; link = link->next) {
38754 rtx use_insn = DF_REF_INSN (link->ref);
38755 rtx use_body = PATTERN (use_insn);
38756 if (GET_CODE (use_body) != SET
38757 || GET_CODE (SET_SRC (use_body)) != UNSPEC
38758 || XINT (SET_SRC (use_body), 1) != UNSPEC_VSX_XXSPLTW
38759 || XEXP (XEXP (SET_SRC (use_body), 0), 1) != const0_rtx)
38760 return 0;
38764 return 1;
38767 /* A concatenation of two doublewords is ok if we reverse the
38768 order of the inputs. */
38769 if (GET_CODE (body) == SET
38770 && GET_CODE (SET_SRC (body)) == VEC_CONCAT
38771 && (GET_MODE (SET_SRC (body)) == V2DFmode
38772 || GET_MODE (SET_SRC (body)) == V2DImode))
38774 *special = SH_CONCAT;
38775 return 1;
38778 /* V2DF reductions are always swappable. */
38779 if (GET_CODE (body) == PARALLEL)
38781 rtx expr = XVECEXP (body, 0, 0);
38782 if (GET_CODE (expr) == SET
38783 && v2df_reduction_p (SET_SRC (expr)))
38784 return 1;
38787 /* An UNSPEC_VPERM is ok if the mask operand is loaded from the
38788 constant pool. */
38789 if (GET_CODE (body) == SET
38790 && GET_CODE (SET_SRC (body)) == UNSPEC
38791 && XINT (SET_SRC (body), 1) == UNSPEC_VPERM
38792 && XVECLEN (SET_SRC (body), 0) == 3
38793 && GET_CODE (XVECEXP (SET_SRC (body), 0, 2)) == REG)
38795 rtx mask_reg = XVECEXP (SET_SRC (body), 0, 2);
38796 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38797 df_ref use;
38798 FOR_EACH_INSN_INFO_USE (use, insn_info)
38799 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
38801 struct df_link *def_link = DF_REF_CHAIN (use);
38802 /* Punt if multiple definitions for this reg. */
38803 if (def_link && !def_link->next &&
38804 const_load_sequence_p (insn_entry,
38805 DF_REF_INSN (def_link->ref)))
38807 *special = SH_VPERM;
38808 return 1;
38813 /* Otherwise check the operands for vector lane violations. */
38814 return rtx_is_swappable_p (body, special);
38817 enum chain_purpose { FOR_LOADS, FOR_STORES };
38819 /* Return true if the UD or DU chain headed by LINK is non-empty,
38820 and every entry on the chain references an insn that is a
38821 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
38822 register swap must have only permuting loads as reaching defs.
38823 If PURPOSE is FOR_STORES, each such register swap must have only
38824 register swaps or permuting stores as reached uses. */
38825 static bool
38826 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
38827 enum chain_purpose purpose)
38829 if (!link)
38830 return false;
38832 for (; link; link = link->next)
38834 if (!ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (DF_REF_REG (link->ref))))
38835 continue;
38837 if (DF_REF_IS_ARTIFICIAL (link->ref))
38838 return false;
38840 rtx reached_insn = DF_REF_INSN (link->ref);
38841 unsigned uid = INSN_UID (reached_insn);
38842 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
38844 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
38845 || insn_entry[uid].is_store)
38846 return false;
38848 if (purpose == FOR_LOADS)
38850 df_ref use;
38851 FOR_EACH_INSN_INFO_USE (use, insn_info)
38853 struct df_link *swap_link = DF_REF_CHAIN (use);
38855 while (swap_link)
38857 if (DF_REF_IS_ARTIFICIAL (link->ref))
38858 return false;
38860 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
38861 unsigned uid2 = INSN_UID (swap_def_insn);
38863 /* Only permuting loads are allowed. */
38864 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
38865 return false;
38867 swap_link = swap_link->next;
38871 else if (purpose == FOR_STORES)
38873 df_ref def;
38874 FOR_EACH_INSN_INFO_DEF (def, insn_info)
38876 struct df_link *swap_link = DF_REF_CHAIN (def);
38878 while (swap_link)
38880 if (DF_REF_IS_ARTIFICIAL (link->ref))
38881 return false;
38883 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
38884 unsigned uid2 = INSN_UID (swap_use_insn);
38886 /* Permuting stores or register swaps are allowed. */
38887 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
38888 return false;
38890 swap_link = swap_link->next;
38896 return true;
38899 /* Mark the xxswapdi instructions associated with permuting loads and
38900 stores for removal. Note that we only flag them for deletion here,
38901 as there is a possibility of a swap being reached from multiple
38902 loads, etc. */
38903 static void
38904 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
38906 rtx insn = insn_entry[i].insn;
38907 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38909 if (insn_entry[i].is_load)
38911 df_ref def;
38912 FOR_EACH_INSN_INFO_DEF (def, insn_info)
38914 struct df_link *link = DF_REF_CHAIN (def);
38916 /* We know by now that these are swaps, so we can delete
38917 them confidently. */
38918 while (link)
38920 rtx use_insn = DF_REF_INSN (link->ref);
38921 insn_entry[INSN_UID (use_insn)].will_delete = 1;
38922 link = link->next;
38926 else if (insn_entry[i].is_store)
38928 df_ref use;
38929 FOR_EACH_INSN_INFO_USE (use, insn_info)
38931 /* Ignore uses for addressability. */
38932 machine_mode mode = GET_MODE (DF_REF_REG (use));
38933 if (!ALTIVEC_OR_VSX_VECTOR_MODE (mode))
38934 continue;
38936 struct df_link *link = DF_REF_CHAIN (use);
38938 /* We know by now that these are swaps, so we can delete
38939 them confidently. */
38940 while (link)
38942 rtx def_insn = DF_REF_INSN (link->ref);
38943 insn_entry[INSN_UID (def_insn)].will_delete = 1;
38944 link = link->next;
38950 /* OP is either a CONST_VECTOR or an expression containing one.
38951 Swap the first half of the vector with the second in the first
38952 case. Recurse to find it in the second. */
38953 static void
38954 swap_const_vector_halves (rtx op)
38956 int i;
38957 enum rtx_code code = GET_CODE (op);
38958 if (GET_CODE (op) == CONST_VECTOR)
38960 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
38961 for (i = 0; i < half_units; ++i)
38963 rtx temp = CONST_VECTOR_ELT (op, i);
38964 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
38965 CONST_VECTOR_ELT (op, i + half_units) = temp;
38968 else
38970 int j;
38971 const char *fmt = GET_RTX_FORMAT (code);
38972 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
38973 if (fmt[i] == 'e' || fmt[i] == 'u')
38974 swap_const_vector_halves (XEXP (op, i));
38975 else if (fmt[i] == 'E')
38976 for (j = 0; j < XVECLEN (op, i); ++j)
38977 swap_const_vector_halves (XVECEXP (op, i, j));
38981 /* Find all subregs of a vector expression that perform a narrowing,
38982 and adjust the subreg index to account for doubleword swapping. */
38983 static void
38984 adjust_subreg_index (rtx op)
38986 enum rtx_code code = GET_CODE (op);
38987 if (code == SUBREG
38988 && (GET_MODE_SIZE (GET_MODE (op))
38989 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
38991 unsigned int index = SUBREG_BYTE (op);
38992 if (index < 8)
38993 index += 8;
38994 else
38995 index -= 8;
38996 SUBREG_BYTE (op) = index;
38999 const char *fmt = GET_RTX_FORMAT (code);
39000 int i,j;
39001 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
39002 if (fmt[i] == 'e' || fmt[i] == 'u')
39003 adjust_subreg_index (XEXP (op, i));
39004 else if (fmt[i] == 'E')
39005 for (j = 0; j < XVECLEN (op, i); ++j)
39006 adjust_subreg_index (XVECEXP (op, i, j));
39009 /* Convert the non-permuting load INSN to a permuting one. */
39010 static void
39011 permute_load (rtx_insn *insn)
39013 rtx body = PATTERN (insn);
39014 rtx mem_op = SET_SRC (body);
39015 rtx tgt_reg = SET_DEST (body);
39016 machine_mode mode = GET_MODE (tgt_reg);
39017 int n_elts = GET_MODE_NUNITS (mode);
39018 int half_elts = n_elts / 2;
39019 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
39020 int i, j;
39021 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
39022 XVECEXP (par, 0, i) = GEN_INT (j);
39023 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
39024 XVECEXP (par, 0, i) = GEN_INT (j);
39025 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
39026 SET_SRC (body) = sel;
39027 INSN_CODE (insn) = -1; /* Force re-recognition. */
39028 df_insn_rescan (insn);
39030 if (dump_file)
39031 fprintf (dump_file, "Replacing load %d with permuted load\n",
39032 INSN_UID (insn));
39035 /* Convert the non-permuting store INSN to a permuting one. */
39036 static void
39037 permute_store (rtx_insn *insn)
39039 rtx body = PATTERN (insn);
39040 rtx src_reg = SET_SRC (body);
39041 machine_mode mode = GET_MODE (src_reg);
39042 int n_elts = GET_MODE_NUNITS (mode);
39043 int half_elts = n_elts / 2;
39044 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
39045 int i, j;
39046 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
39047 XVECEXP (par, 0, i) = GEN_INT (j);
39048 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
39049 XVECEXP (par, 0, i) = GEN_INT (j);
39050 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
39051 SET_SRC (body) = sel;
39052 INSN_CODE (insn) = -1; /* Force re-recognition. */
39053 df_insn_rescan (insn);
39055 if (dump_file)
39056 fprintf (dump_file, "Replacing store %d with permuted store\n",
39057 INSN_UID (insn));
39060 /* Given OP that contains a vector extract operation, adjust the index
39061 of the extracted lane to account for the doubleword swap. */
39062 static void
39063 adjust_extract (rtx_insn *insn)
39065 rtx pattern = PATTERN (insn);
39066 if (GET_CODE (pattern) == PARALLEL)
39067 pattern = XVECEXP (pattern, 0, 0);
39068 rtx src = SET_SRC (pattern);
39069 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
39070 account for that. */
39071 rtx sel = GET_CODE (src) == VEC_DUPLICATE ? XEXP (src, 0) : src;
39072 rtx par = XEXP (sel, 1);
39073 int half_elts = GET_MODE_NUNITS (GET_MODE (XEXP (sel, 0))) >> 1;
39074 int lane = INTVAL (XVECEXP (par, 0, 0));
39075 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
39076 XVECEXP (par, 0, 0) = GEN_INT (lane);
39077 INSN_CODE (insn) = -1; /* Force re-recognition. */
39078 df_insn_rescan (insn);
39080 if (dump_file)
39081 fprintf (dump_file, "Changing lane for extract %d\n", INSN_UID (insn));
39084 /* Given OP that contains a vector direct-splat operation, adjust the index
39085 of the source lane to account for the doubleword swap. */
39086 static void
39087 adjust_splat (rtx_insn *insn)
39089 rtx body = PATTERN (insn);
39090 rtx unspec = XEXP (body, 1);
39091 int half_elts = GET_MODE_NUNITS (GET_MODE (unspec)) >> 1;
39092 int lane = INTVAL (XVECEXP (unspec, 0, 1));
39093 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
39094 XVECEXP (unspec, 0, 1) = GEN_INT (lane);
39095 INSN_CODE (insn) = -1; /* Force re-recognition. */
39096 df_insn_rescan (insn);
39098 if (dump_file)
39099 fprintf (dump_file, "Changing lane for splat %d\n", INSN_UID (insn));
39102 /* Given OP that contains an XXPERMDI operation (that is not a doubleword
39103 swap), reverse the order of the source operands and adjust the indices
39104 of the source lanes to account for doubleword reversal. */
39105 static void
39106 adjust_xxpermdi (rtx_insn *insn)
39108 rtx set = PATTERN (insn);
39109 rtx select = XEXP (set, 1);
39110 rtx concat = XEXP (select, 0);
39111 rtx src0 = XEXP (concat, 0);
39112 XEXP (concat, 0) = XEXP (concat, 1);
39113 XEXP (concat, 1) = src0;
39114 rtx parallel = XEXP (select, 1);
39115 int lane0 = INTVAL (XVECEXP (parallel, 0, 0));
39116 int lane1 = INTVAL (XVECEXP (parallel, 0, 1));
39117 int new_lane0 = 3 - lane1;
39118 int new_lane1 = 3 - lane0;
39119 XVECEXP (parallel, 0, 0) = GEN_INT (new_lane0);
39120 XVECEXP (parallel, 0, 1) = GEN_INT (new_lane1);
39121 INSN_CODE (insn) = -1; /* Force re-recognition. */
39122 df_insn_rescan (insn);
39124 if (dump_file)
39125 fprintf (dump_file, "Changing lanes for xxpermdi %d\n", INSN_UID (insn));
39128 /* Given OP that contains a VEC_CONCAT operation of two doublewords,
39129 reverse the order of those inputs. */
39130 static void
39131 adjust_concat (rtx_insn *insn)
39133 rtx set = PATTERN (insn);
39134 rtx concat = XEXP (set, 1);
39135 rtx src0 = XEXP (concat, 0);
39136 XEXP (concat, 0) = XEXP (concat, 1);
39137 XEXP (concat, 1) = src0;
39138 INSN_CODE (insn) = -1; /* Force re-recognition. */
39139 df_insn_rescan (insn);
39141 if (dump_file)
39142 fprintf (dump_file, "Reversing inputs for concat %d\n", INSN_UID (insn));
39145 /* Given an UNSPEC_VPERM insn, modify the mask loaded from the
39146 constant pool to reflect swapped doublewords. */
39147 static void
39148 adjust_vperm (rtx_insn *insn)
39150 /* We previously determined that the UNSPEC_VPERM was fed by a
39151 swap of a swapping load of a TOC-relative constant pool symbol.
39152 Find the MEM in the swapping load and replace it with a MEM for
39153 the adjusted mask constant. */
39154 rtx set = PATTERN (insn);
39155 rtx mask_reg = XVECEXP (SET_SRC (set), 0, 2);
39157 /* Find the swap. */
39158 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39159 df_ref use;
39160 rtx_insn *swap_insn = 0;
39161 FOR_EACH_INSN_INFO_USE (use, insn_info)
39162 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
39164 struct df_link *def_link = DF_REF_CHAIN (use);
39165 gcc_assert (def_link && !def_link->next);
39166 swap_insn = DF_REF_INSN (def_link->ref);
39167 break;
39169 gcc_assert (swap_insn);
39171 /* Find the load. */
39172 insn_info = DF_INSN_INFO_GET (swap_insn);
39173 rtx_insn *load_insn = 0;
39174 FOR_EACH_INSN_INFO_USE (use, insn_info)
39176 struct df_link *def_link = DF_REF_CHAIN (use);
39177 gcc_assert (def_link && !def_link->next);
39178 load_insn = DF_REF_INSN (def_link->ref);
39179 break;
39181 gcc_assert (load_insn);
39183 /* Find the TOC-relative symbol access. */
39184 insn_info = DF_INSN_INFO_GET (load_insn);
39185 rtx_insn *tocrel_insn = 0;
39186 FOR_EACH_INSN_INFO_USE (use, insn_info)
39188 struct df_link *def_link = DF_REF_CHAIN (use);
39189 gcc_assert (def_link && !def_link->next);
39190 tocrel_insn = DF_REF_INSN (def_link->ref);
39191 break;
39193 gcc_assert (tocrel_insn);
39195 /* Find the embedded CONST_VECTOR. We have to call toc_relative_expr_p
39196 to set tocrel_base; otherwise it would be unnecessary as we've
39197 already established it will return true. */
39198 rtx base, offset;
39199 rtx tocrel_expr = SET_SRC (PATTERN (tocrel_insn));
39200 /* There is an extra level of indirection for small/large code models. */
39201 if (GET_CODE (tocrel_expr) == MEM)
39202 tocrel_expr = XEXP (tocrel_expr, 0);
39203 if (!toc_relative_expr_p (tocrel_expr, false))
39204 gcc_unreachable ();
39205 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
39206 rtx const_vector = get_pool_constant (base);
39207 /* With the extra indirection, get_pool_constant will produce the
39208 real constant from the reg_equal expression, so get the real
39209 constant. */
39210 if (GET_CODE (const_vector) == SYMBOL_REF)
39211 const_vector = get_pool_constant (const_vector);
39212 gcc_assert (GET_CODE (const_vector) == CONST_VECTOR);
39214 /* Create an adjusted mask from the initial mask. */
39215 unsigned int new_mask[16], i, val;
39216 for (i = 0; i < 16; ++i) {
39217 val = INTVAL (XVECEXP (const_vector, 0, i));
39218 if (val < 16)
39219 new_mask[i] = (val + 8) % 16;
39220 else
39221 new_mask[i] = ((val + 8) % 16) + 16;
39224 /* Create a new CONST_VECTOR and a MEM that references it. */
39225 rtx vals = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
39226 for (i = 0; i < 16; ++i)
39227 XVECEXP (vals, 0, i) = GEN_INT (new_mask[i]);
39228 rtx new_const_vector = gen_rtx_CONST_VECTOR (V16QImode, XVEC (vals, 0));
39229 rtx new_mem = force_const_mem (V16QImode, new_const_vector);
39230 /* This gives us a MEM whose base operand is a SYMBOL_REF, which we
39231 can't recognize. Force the SYMBOL_REF into a register. */
39232 if (!REG_P (XEXP (new_mem, 0))) {
39233 rtx base_reg = force_reg (Pmode, XEXP (new_mem, 0));
39234 XEXP (new_mem, 0) = base_reg;
39235 /* Move the newly created insn ahead of the load insn. */
39236 rtx_insn *force_insn = get_last_insn ();
39237 remove_insn (force_insn);
39238 rtx_insn *before_load_insn = PREV_INSN (load_insn);
39239 add_insn_after (force_insn, before_load_insn, BLOCK_FOR_INSN (load_insn));
39240 df_insn_rescan (before_load_insn);
39241 df_insn_rescan (force_insn);
39244 /* Replace the MEM in the load instruction and rescan it. */
39245 XEXP (SET_SRC (PATTERN (load_insn)), 0) = new_mem;
39246 INSN_CODE (load_insn) = -1; /* Force re-recognition. */
39247 df_insn_rescan (load_insn);
39249 if (dump_file)
39250 fprintf (dump_file, "Adjusting mask for vperm %d\n", INSN_UID (insn));
39253 /* The insn described by INSN_ENTRY[I] can be swapped, but only
39254 with special handling. Take care of that here. */
39255 static void
39256 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
39258 rtx_insn *insn = insn_entry[i].insn;
39259 rtx body = PATTERN (insn);
39261 switch (insn_entry[i].special_handling)
39263 default:
39264 gcc_unreachable ();
39265 case SH_CONST_VECTOR:
39267 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
39268 gcc_assert (GET_CODE (body) == SET);
39269 rtx rhs = SET_SRC (body);
39270 swap_const_vector_halves (rhs);
39271 if (dump_file)
39272 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
39273 break;
39275 case SH_SUBREG:
39276 /* A subreg of the same size is already safe. For subregs that
39277 select a smaller portion of a reg, adjust the index for
39278 swapped doublewords. */
39279 adjust_subreg_index (body);
39280 if (dump_file)
39281 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
39282 break;
39283 case SH_NOSWAP_LD:
39284 /* Convert a non-permuting load to a permuting one. */
39285 permute_load (insn);
39286 break;
39287 case SH_NOSWAP_ST:
39288 /* Convert a non-permuting store to a permuting one. */
39289 permute_store (insn);
39290 break;
39291 case SH_EXTRACT:
39292 /* Change the lane on an extract operation. */
39293 adjust_extract (insn);
39294 break;
39295 case SH_SPLAT:
39296 /* Change the lane on a direct-splat operation. */
39297 adjust_splat (insn);
39298 break;
39299 case SH_XXPERMDI:
39300 /* Change the lanes on an XXPERMDI operation. */
39301 adjust_xxpermdi (insn);
39302 break;
39303 case SH_CONCAT:
39304 /* Reverse the order of a concatenation operation. */
39305 adjust_concat (insn);
39306 break;
39307 case SH_VPERM:
39308 /* Change the mask loaded from the constant pool for a VPERM. */
39309 adjust_vperm (insn);
39310 break;
39314 /* Find the insn from the Ith table entry, which is known to be a
39315 register swap Y = SWAP(X). Replace it with a copy Y = X. */
39316 static void
39317 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
39319 rtx_insn *insn = insn_entry[i].insn;
39320 rtx body = PATTERN (insn);
39321 rtx src_reg = XEXP (SET_SRC (body), 0);
39322 rtx copy = gen_rtx_SET (SET_DEST (body), src_reg);
39323 rtx_insn *new_insn = emit_insn_before (copy, insn);
39324 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
39325 df_insn_rescan (new_insn);
39327 if (dump_file)
39329 unsigned int new_uid = INSN_UID (new_insn);
39330 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
39333 df_insn_delete (insn);
39334 remove_insn (insn);
39335 insn->set_deleted ();
39338 /* Dump the swap table to DUMP_FILE. */
39339 static void
39340 dump_swap_insn_table (swap_web_entry *insn_entry)
39342 int e = get_max_uid ();
39343 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
39345 for (int i = 0; i < e; ++i)
39346 if (insn_entry[i].is_relevant)
39348 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
39349 fprintf (dump_file, "%6d %6d ", i,
39350 pred_entry && pred_entry->insn
39351 ? INSN_UID (pred_entry->insn) : 0);
39352 if (insn_entry[i].is_load)
39353 fputs ("load ", dump_file);
39354 if (insn_entry[i].is_store)
39355 fputs ("store ", dump_file);
39356 if (insn_entry[i].is_swap)
39357 fputs ("swap ", dump_file);
39358 if (insn_entry[i].is_live_in)
39359 fputs ("live-in ", dump_file);
39360 if (insn_entry[i].is_live_out)
39361 fputs ("live-out ", dump_file);
39362 if (insn_entry[i].contains_subreg)
39363 fputs ("subreg ", dump_file);
39364 if (insn_entry[i].is_128_int)
39365 fputs ("int128 ", dump_file);
39366 if (insn_entry[i].is_call)
39367 fputs ("call ", dump_file);
39368 if (insn_entry[i].is_swappable)
39370 fputs ("swappable ", dump_file);
39371 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
39372 fputs ("special:constvec ", dump_file);
39373 else if (insn_entry[i].special_handling == SH_SUBREG)
39374 fputs ("special:subreg ", dump_file);
39375 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
39376 fputs ("special:load ", dump_file);
39377 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
39378 fputs ("special:store ", dump_file);
39379 else if (insn_entry[i].special_handling == SH_EXTRACT)
39380 fputs ("special:extract ", dump_file);
39381 else if (insn_entry[i].special_handling == SH_SPLAT)
39382 fputs ("special:splat ", dump_file);
39383 else if (insn_entry[i].special_handling == SH_XXPERMDI)
39384 fputs ("special:xxpermdi ", dump_file);
39385 else if (insn_entry[i].special_handling == SH_CONCAT)
39386 fputs ("special:concat ", dump_file);
39387 else if (insn_entry[i].special_handling == SH_VPERM)
39388 fputs ("special:vperm ", dump_file);
39390 if (insn_entry[i].web_not_optimizable)
39391 fputs ("unoptimizable ", dump_file);
39392 if (insn_entry[i].will_delete)
39393 fputs ("delete ", dump_file);
39394 fputs ("\n", dump_file);
39396 fputs ("\n", dump_file);
39399 /* Return RTX with its address canonicalized to (reg) or (+ reg reg).
39400 Here RTX is an (& addr (const_int -16)). Always return a new copy
39401 to avoid problems with combine. */
39402 static rtx
39403 alignment_with_canonical_addr (rtx align)
39405 rtx canon;
39406 rtx addr = XEXP (align, 0);
39408 if (REG_P (addr))
39409 canon = addr;
39411 else if (GET_CODE (addr) == PLUS)
39413 rtx addrop0 = XEXP (addr, 0);
39414 rtx addrop1 = XEXP (addr, 1);
39416 if (!REG_P (addrop0))
39417 addrop0 = force_reg (GET_MODE (addrop0), addrop0);
39419 if (!REG_P (addrop1))
39420 addrop1 = force_reg (GET_MODE (addrop1), addrop1);
39422 canon = gen_rtx_PLUS (GET_MODE (addr), addrop0, addrop1);
39425 else
39426 canon = force_reg (GET_MODE (addr), addr);
39428 return gen_rtx_AND (GET_MODE (align), canon, GEN_INT (-16));
39431 /* Check whether an rtx is an alignment mask, and if so, return
39432 a fully-expanded rtx for the masking operation. */
39433 static rtx
39434 alignment_mask (rtx_insn *insn)
39436 rtx body = PATTERN (insn);
39438 if (GET_CODE (body) != SET
39439 || GET_CODE (SET_SRC (body)) != AND
39440 || !REG_P (XEXP (SET_SRC (body), 0)))
39441 return 0;
39443 rtx mask = XEXP (SET_SRC (body), 1);
39445 if (GET_CODE (mask) == CONST_INT)
39447 if (INTVAL (mask) == -16)
39448 return alignment_with_canonical_addr (SET_SRC (body));
39449 else
39450 return 0;
39453 if (!REG_P (mask))
39454 return 0;
39456 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39457 df_ref use;
39458 rtx real_mask = 0;
39460 FOR_EACH_INSN_INFO_USE (use, insn_info)
39462 if (!rtx_equal_p (DF_REF_REG (use), mask))
39463 continue;
39465 struct df_link *def_link = DF_REF_CHAIN (use);
39466 if (!def_link || def_link->next)
39467 return 0;
39469 rtx_insn *const_insn = DF_REF_INSN (def_link->ref);
39470 rtx const_body = PATTERN (const_insn);
39471 if (GET_CODE (const_body) != SET)
39472 return 0;
39474 real_mask = SET_SRC (const_body);
39476 if (GET_CODE (real_mask) != CONST_INT
39477 || INTVAL (real_mask) != -16)
39478 return 0;
39481 if (real_mask == 0)
39482 return 0;
39484 return alignment_with_canonical_addr (SET_SRC (body));
39487 /* Given INSN that's a load or store based at BASE_REG, look for a
39488 feeding computation that aligns its address on a 16-byte boundary. */
39489 static rtx
39490 find_alignment_op (rtx_insn *insn, rtx base_reg)
39492 df_ref base_use;
39493 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39494 rtx and_operation = 0;
39496 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
39498 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
39499 continue;
39501 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
39502 if (!base_def_link || base_def_link->next)
39503 break;
39505 rtx_insn *and_insn = DF_REF_INSN (base_def_link->ref);
39506 and_operation = alignment_mask (and_insn);
39507 if (and_operation != 0)
39508 break;
39511 return and_operation;
39514 struct del_info { bool replace; rtx_insn *replace_insn; };
39516 /* If INSN is the load for an lvx pattern, put it in canonical form. */
39517 static void
39518 recombine_lvx_pattern (rtx_insn *insn, del_info *to_delete)
39520 rtx body = PATTERN (insn);
39521 gcc_assert (GET_CODE (body) == SET
39522 && GET_CODE (SET_SRC (body)) == VEC_SELECT
39523 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM);
39525 rtx mem = XEXP (SET_SRC (body), 0);
39526 rtx base_reg = XEXP (mem, 0);
39528 rtx and_operation = find_alignment_op (insn, base_reg);
39530 if (and_operation != 0)
39532 df_ref def;
39533 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39534 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39536 struct df_link *link = DF_REF_CHAIN (def);
39537 if (!link || link->next)
39538 break;
39540 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
39541 if (!insn_is_swap_p (swap_insn)
39542 || insn_is_load_p (swap_insn)
39543 || insn_is_store_p (swap_insn))
39544 break;
39546 /* Expected lvx pattern found. Change the swap to
39547 a copy, and propagate the AND operation into the
39548 load. */
39549 to_delete[INSN_UID (swap_insn)].replace = true;
39550 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
39552 XEXP (mem, 0) = and_operation;
39553 SET_SRC (body) = mem;
39554 INSN_CODE (insn) = -1; /* Force re-recognition. */
39555 df_insn_rescan (insn);
39557 if (dump_file)
39558 fprintf (dump_file, "lvx opportunity found at %d\n",
39559 INSN_UID (insn));
39564 /* If INSN is the store for an stvx pattern, put it in canonical form. */
39565 static void
39566 recombine_stvx_pattern (rtx_insn *insn, del_info *to_delete)
39568 rtx body = PATTERN (insn);
39569 gcc_assert (GET_CODE (body) == SET
39570 && GET_CODE (SET_DEST (body)) == MEM
39571 && GET_CODE (SET_SRC (body)) == VEC_SELECT);
39572 rtx mem = SET_DEST (body);
39573 rtx base_reg = XEXP (mem, 0);
39575 rtx and_operation = find_alignment_op (insn, base_reg);
39577 if (and_operation != 0)
39579 rtx src_reg = XEXP (SET_SRC (body), 0);
39580 df_ref src_use;
39581 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39582 FOR_EACH_INSN_INFO_USE (src_use, insn_info)
39584 if (!rtx_equal_p (DF_REF_REG (src_use), src_reg))
39585 continue;
39587 struct df_link *link = DF_REF_CHAIN (src_use);
39588 if (!link || link->next)
39589 break;
39591 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
39592 if (!insn_is_swap_p (swap_insn)
39593 || insn_is_load_p (swap_insn)
39594 || insn_is_store_p (swap_insn))
39595 break;
39597 /* Expected stvx pattern found. Change the swap to
39598 a copy, and propagate the AND operation into the
39599 store. */
39600 to_delete[INSN_UID (swap_insn)].replace = true;
39601 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
39603 XEXP (mem, 0) = and_operation;
39604 SET_SRC (body) = src_reg;
39605 INSN_CODE (insn) = -1; /* Force re-recognition. */
39606 df_insn_rescan (insn);
39608 if (dump_file)
39609 fprintf (dump_file, "stvx opportunity found at %d\n",
39610 INSN_UID (insn));
39615 /* Look for patterns created from builtin lvx and stvx calls, and
39616 canonicalize them to be properly recognized as such. */
39617 static void
39618 recombine_lvx_stvx_patterns (function *fun)
39620 int i;
39621 basic_block bb;
39622 rtx_insn *insn;
39624 int num_insns = get_max_uid ();
39625 del_info *to_delete = XCNEWVEC (del_info, num_insns);
39627 FOR_ALL_BB_FN (bb, fun)
39628 FOR_BB_INSNS (bb, insn)
39630 if (!NONDEBUG_INSN_P (insn))
39631 continue;
39633 if (insn_is_load_p (insn) && insn_is_swap_p (insn))
39634 recombine_lvx_pattern (insn, to_delete);
39635 else if (insn_is_store_p (insn) && insn_is_swap_p (insn))
39636 recombine_stvx_pattern (insn, to_delete);
39639 /* Turning swaps into copies is delayed until now, to avoid problems
39640 with deleting instructions during the insn walk. */
39641 for (i = 0; i < num_insns; i++)
39642 if (to_delete[i].replace)
39644 rtx swap_body = PATTERN (to_delete[i].replace_insn);
39645 rtx src_reg = XEXP (SET_SRC (swap_body), 0);
39646 rtx copy = gen_rtx_SET (SET_DEST (swap_body), src_reg);
39647 rtx_insn *new_insn = emit_insn_before (copy,
39648 to_delete[i].replace_insn);
39649 set_block_for_insn (new_insn,
39650 BLOCK_FOR_INSN (to_delete[i].replace_insn));
39651 df_insn_rescan (new_insn);
39652 df_insn_delete (to_delete[i].replace_insn);
39653 remove_insn (to_delete[i].replace_insn);
39654 to_delete[i].replace_insn->set_deleted ();
39657 free (to_delete);
39660 /* Main entry point for this pass. */
39661 unsigned int
39662 rs6000_analyze_swaps (function *fun)
39664 swap_web_entry *insn_entry;
39665 basic_block bb;
39666 rtx_insn *insn, *curr_insn = 0;
39668 /* Dataflow analysis for use-def chains. */
39669 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
39670 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
39671 df_analyze ();
39672 df_set_flags (DF_DEFER_INSN_RESCAN);
39674 /* Pre-pass to recombine lvx and stvx patterns so we don't lose info. */
39675 recombine_lvx_stvx_patterns (fun);
39677 /* Allocate structure to represent webs of insns. */
39678 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
39680 /* Walk the insns to gather basic data. */
39681 FOR_ALL_BB_FN (bb, fun)
39682 FOR_BB_INSNS_SAFE (bb, insn, curr_insn)
39684 unsigned int uid = INSN_UID (insn);
39685 if (NONDEBUG_INSN_P (insn))
39687 insn_entry[uid].insn = insn;
39689 if (GET_CODE (insn) == CALL_INSN)
39690 insn_entry[uid].is_call = 1;
39692 /* Walk the uses and defs to see if we mention vector regs.
39693 Record any constraints on optimization of such mentions. */
39694 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39695 df_ref mention;
39696 FOR_EACH_INSN_INFO_USE (mention, insn_info)
39698 /* We use DF_REF_REAL_REG here to get inside any subregs. */
39699 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
39701 /* If a use gets its value from a call insn, it will be
39702 a hard register and will look like (reg:V4SI 3 3).
39703 The df analysis creates two mentions for GPR3 and GPR4,
39704 both DImode. We must recognize this and treat it as a
39705 vector mention to ensure the call is unioned with this
39706 use. */
39707 if (mode == DImode && DF_REF_INSN_INFO (mention))
39709 rtx feeder = DF_REF_INSN (mention);
39710 /* FIXME: It is pretty hard to get from the df mention
39711 to the mode of the use in the insn. We arbitrarily
39712 pick a vector mode here, even though the use might
39713 be a real DImode. We can be too conservative
39714 (create a web larger than necessary) because of
39715 this, so consider eventually fixing this. */
39716 if (GET_CODE (feeder) == CALL_INSN)
39717 mode = V4SImode;
39720 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
39722 insn_entry[uid].is_relevant = 1;
39723 if (mode == TImode || mode == V1TImode
39724 || FLOAT128_VECTOR_P (mode))
39725 insn_entry[uid].is_128_int = 1;
39726 if (DF_REF_INSN_INFO (mention))
39727 insn_entry[uid].contains_subreg
39728 = !rtx_equal_p (DF_REF_REG (mention),
39729 DF_REF_REAL_REG (mention));
39730 union_defs (insn_entry, insn, mention);
39733 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
39735 /* We use DF_REF_REAL_REG here to get inside any subregs. */
39736 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
39738 /* If we're loading up a hard vector register for a call,
39739 it looks like (set (reg:V4SI 9 9) (...)). The df
39740 analysis creates two mentions for GPR9 and GPR10, both
39741 DImode. So relying on the mode from the mentions
39742 isn't sufficient to ensure we union the call into the
39743 web with the parameter setup code. */
39744 if (mode == DImode && GET_CODE (insn) == SET
39745 && ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (SET_DEST (insn))))
39746 mode = GET_MODE (SET_DEST (insn));
39748 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
39750 insn_entry[uid].is_relevant = 1;
39751 if (mode == TImode || mode == V1TImode
39752 || FLOAT128_VECTOR_P (mode))
39753 insn_entry[uid].is_128_int = 1;
39754 if (DF_REF_INSN_INFO (mention))
39755 insn_entry[uid].contains_subreg
39756 = !rtx_equal_p (DF_REF_REG (mention),
39757 DF_REF_REAL_REG (mention));
39758 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
39759 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
39760 insn_entry[uid].is_live_out = 1;
39761 union_uses (insn_entry, insn, mention);
39765 if (insn_entry[uid].is_relevant)
39767 /* Determine if this is a load or store. */
39768 insn_entry[uid].is_load = insn_is_load_p (insn);
39769 insn_entry[uid].is_store = insn_is_store_p (insn);
39771 /* Determine if this is a doubleword swap. If not,
39772 determine whether it can legally be swapped. */
39773 if (insn_is_swap_p (insn))
39774 insn_entry[uid].is_swap = 1;
39775 else
39777 unsigned int special = SH_NONE;
39778 insn_entry[uid].is_swappable
39779 = insn_is_swappable_p (insn_entry, insn, &special);
39780 if (special != SH_NONE && insn_entry[uid].contains_subreg)
39781 insn_entry[uid].is_swappable = 0;
39782 else if (special != SH_NONE)
39783 insn_entry[uid].special_handling = special;
39784 else if (insn_entry[uid].contains_subreg)
39785 insn_entry[uid].special_handling = SH_SUBREG;
39791 if (dump_file)
39793 fprintf (dump_file, "\nSwap insn entry table when first built\n");
39794 dump_swap_insn_table (insn_entry);
39797 /* Record unoptimizable webs. */
39798 unsigned e = get_max_uid (), i;
39799 for (i = 0; i < e; ++i)
39801 if (!insn_entry[i].is_relevant)
39802 continue;
39804 swap_web_entry *root
39805 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
39807 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
39808 || (insn_entry[i].contains_subreg
39809 && insn_entry[i].special_handling != SH_SUBREG)
39810 || insn_entry[i].is_128_int || insn_entry[i].is_call
39811 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
39812 root->web_not_optimizable = 1;
39814 /* If we have loads or stores that aren't permuting then the
39815 optimization isn't appropriate. */
39816 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
39817 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
39818 root->web_not_optimizable = 1;
39820 /* If we have permuting loads or stores that are not accompanied
39821 by a register swap, the optimization isn't appropriate. */
39822 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
39824 rtx insn = insn_entry[i].insn;
39825 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39826 df_ref def;
39828 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39830 struct df_link *link = DF_REF_CHAIN (def);
39832 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
39834 root->web_not_optimizable = 1;
39835 break;
39839 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
39841 rtx insn = insn_entry[i].insn;
39842 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39843 df_ref use;
39845 FOR_EACH_INSN_INFO_USE (use, insn_info)
39847 struct df_link *link = DF_REF_CHAIN (use);
39849 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
39851 root->web_not_optimizable = 1;
39852 break;
39858 if (dump_file)
39860 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
39861 dump_swap_insn_table (insn_entry);
39864 /* For each load and store in an optimizable web (which implies
39865 the loads and stores are permuting), find the associated
39866 register swaps and mark them for removal. Due to various
39867 optimizations we may mark the same swap more than once. Also
39868 perform special handling for swappable insns that require it. */
39869 for (i = 0; i < e; ++i)
39870 if ((insn_entry[i].is_load || insn_entry[i].is_store)
39871 && insn_entry[i].is_swap)
39873 swap_web_entry* root_entry
39874 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
39875 if (!root_entry->web_not_optimizable)
39876 mark_swaps_for_removal (insn_entry, i);
39878 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
39880 swap_web_entry* root_entry
39881 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
39882 if (!root_entry->web_not_optimizable)
39883 handle_special_swappables (insn_entry, i);
39886 /* Now delete the swaps marked for removal. */
39887 for (i = 0; i < e; ++i)
39888 if (insn_entry[i].will_delete)
39889 replace_swap_with_copy (insn_entry, i);
39891 /* Clean up. */
39892 free (insn_entry);
39893 return 0;
39896 const pass_data pass_data_analyze_swaps =
39898 RTL_PASS, /* type */
39899 "swaps", /* name */
39900 OPTGROUP_NONE, /* optinfo_flags */
39901 TV_NONE, /* tv_id */
39902 0, /* properties_required */
39903 0, /* properties_provided */
39904 0, /* properties_destroyed */
39905 0, /* todo_flags_start */
39906 TODO_df_finish, /* todo_flags_finish */
39909 class pass_analyze_swaps : public rtl_opt_pass
39911 public:
39912 pass_analyze_swaps(gcc::context *ctxt)
39913 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
39916 /* opt_pass methods: */
39917 virtual bool gate (function *)
39919 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
39920 && !TARGET_P9_VECTOR && rs6000_optimize_swaps);
39923 virtual unsigned int execute (function *fun)
39925 return rs6000_analyze_swaps (fun);
39928 }; // class pass_analyze_swaps
39930 rtl_opt_pass *
39931 make_pass_analyze_swaps (gcc::context *ctxt)
39933 return new pass_analyze_swaps (ctxt);
39936 #ifdef RS6000_GLIBC_ATOMIC_FENV
39937 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39938 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39939 #endif
39941 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39943 static void
39944 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39946 if (!TARGET_HARD_FLOAT || !TARGET_FPRS)
39948 #ifdef RS6000_GLIBC_ATOMIC_FENV
39949 if (atomic_hold_decl == NULL_TREE)
39951 atomic_hold_decl
39952 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39953 get_identifier ("__atomic_feholdexcept"),
39954 build_function_type_list (void_type_node,
39955 double_ptr_type_node,
39956 NULL_TREE));
39957 TREE_PUBLIC (atomic_hold_decl) = 1;
39958 DECL_EXTERNAL (atomic_hold_decl) = 1;
39961 if (atomic_clear_decl == NULL_TREE)
39963 atomic_clear_decl
39964 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39965 get_identifier ("__atomic_feclearexcept"),
39966 build_function_type_list (void_type_node,
39967 NULL_TREE));
39968 TREE_PUBLIC (atomic_clear_decl) = 1;
39969 DECL_EXTERNAL (atomic_clear_decl) = 1;
39972 tree const_double = build_qualified_type (double_type_node,
39973 TYPE_QUAL_CONST);
39974 tree const_double_ptr = build_pointer_type (const_double);
39975 if (atomic_update_decl == NULL_TREE)
39977 atomic_update_decl
39978 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39979 get_identifier ("__atomic_feupdateenv"),
39980 build_function_type_list (void_type_node,
39981 const_double_ptr,
39982 NULL_TREE));
39983 TREE_PUBLIC (atomic_update_decl) = 1;
39984 DECL_EXTERNAL (atomic_update_decl) = 1;
39987 tree fenv_var = create_tmp_var_raw (double_type_node);
39988 TREE_ADDRESSABLE (fenv_var) = 1;
39989 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39991 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39992 *clear = build_call_expr (atomic_clear_decl, 0);
39993 *update = build_call_expr (atomic_update_decl, 1,
39994 fold_convert (const_double_ptr, fenv_addr));
39995 #endif
39996 return;
39999 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
40000 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
40001 tree call_mffs = build_call_expr (mffs, 0);
40003 /* Generates the equivalent of feholdexcept (&fenv_var)
40005 *fenv_var = __builtin_mffs ();
40006 double fenv_hold;
40007 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
40008 __builtin_mtfsf (0xff, fenv_hold); */
40010 /* Mask to clear everything except for the rounding modes and non-IEEE
40011 arithmetic flag. */
40012 const unsigned HOST_WIDE_INT hold_exception_mask =
40013 HOST_WIDE_INT_C (0xffffffff00000007);
40015 tree fenv_var = create_tmp_var_raw (double_type_node);
40017 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
40019 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
40020 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
40021 build_int_cst (uint64_type_node,
40022 hold_exception_mask));
40024 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
40025 fenv_llu_and);
40027 tree hold_mtfsf = build_call_expr (mtfsf, 2,
40028 build_int_cst (unsigned_type_node, 0xff),
40029 fenv_hold_mtfsf);
40031 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
40033 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
40035 double fenv_clear = __builtin_mffs ();
40036 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
40037 __builtin_mtfsf (0xff, fenv_clear); */
40039 /* Mask to clear everything except for the rounding modes and non-IEEE
40040 arithmetic flag. */
40041 const unsigned HOST_WIDE_INT clear_exception_mask =
40042 HOST_WIDE_INT_C (0xffffffff00000000);
40044 tree fenv_clear = create_tmp_var_raw (double_type_node);
40046 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
40048 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
40049 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
40050 fenv_clean_llu,
40051 build_int_cst (uint64_type_node,
40052 clear_exception_mask));
40054 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
40055 fenv_clear_llu_and);
40057 tree clear_mtfsf = build_call_expr (mtfsf, 2,
40058 build_int_cst (unsigned_type_node, 0xff),
40059 fenv_clear_mtfsf);
40061 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
40063 /* Generates the equivalent of feupdateenv (&fenv_var)
40065 double old_fenv = __builtin_mffs ();
40066 double fenv_update;
40067 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
40068 (*(uint64_t*)fenv_var 0x1ff80fff);
40069 __builtin_mtfsf (0xff, fenv_update); */
40071 const unsigned HOST_WIDE_INT update_exception_mask =
40072 HOST_WIDE_INT_C (0xffffffff1fffff00);
40073 const unsigned HOST_WIDE_INT new_exception_mask =
40074 HOST_WIDE_INT_C (0x1ff80fff);
40076 tree old_fenv = create_tmp_var_raw (double_type_node);
40077 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
40079 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
40080 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
40081 build_int_cst (uint64_type_node,
40082 update_exception_mask));
40084 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
40085 build_int_cst (uint64_type_node,
40086 new_exception_mask));
40088 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
40089 old_llu_and, new_llu_and);
40091 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
40092 new_llu_mask);
40094 tree update_mtfsf = build_call_expr (mtfsf, 2,
40095 build_int_cst (unsigned_type_node, 0xff),
40096 fenv_update_mtfsf);
40098 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
40101 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
40103 static bool
40104 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
40105 optimization_type opt_type)
40107 switch (op)
40109 case rsqrt_optab:
40110 return (opt_type == OPTIMIZE_FOR_SPEED
40111 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
40113 default:
40114 return true;
40118 struct gcc_target targetm = TARGET_INITIALIZER;
40120 #include "gt-rs6000.h"