merge adjust_cost and adjust_cost_2 target hooks
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobe5d8ad0719867d2c7254c7ac61fd4d91bcd48163
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2016 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "cfgloop.h"
30 #include "df.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "expmed.h"
34 #include "optabs.h"
35 #include "regs.h"
36 #include "ira.h"
37 #include "recog.h"
38 #include "cgraph.h"
39 #include "diagnostic-core.h"
40 #include "insn-attr.h"
41 #include "flags.h"
42 #include "alias.h"
43 #include "fold-const.h"
44 #include "stor-layout.h"
45 #include "calls.h"
46 #include "print-tree.h"
47 #include "varasm.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "output.h"
51 #include "dbxout.h"
52 #include "common/common-target.h"
53 #include "langhooks.h"
54 #include "reload.h"
55 #include "sched-int.h"
56 #include "gimplify.h"
57 #include "gimple-iterator.h"
58 #include "gimple-walk.h"
59 #include "intl.h"
60 #include "params.h"
61 #include "tm-constrs.h"
62 #include "tree-vectorizer.h"
63 #include "target-globals.h"
64 #include "builtins.h"
65 #include "context.h"
66 #include "tree-pass.h"
67 #if TARGET_XCOFF
68 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
69 #endif
70 #if TARGET_MACHO
71 #include "gstab.h" /* for N_SLINE */
72 #endif
73 #include "case-cfn-macros.h"
74 #include "ppc-auxv.h"
76 /* This file should be included last. */
77 #include "target-def.h"
79 #ifndef TARGET_NO_PROTOTYPE
80 #define TARGET_NO_PROTOTYPE 0
81 #endif
83 #define min(A,B) ((A) < (B) ? (A) : (B))
84 #define max(A,B) ((A) > (B) ? (A) : (B))
86 /* Structure used to define the rs6000 stack */
87 typedef struct rs6000_stack {
88 int reload_completed; /* stack info won't change from here on */
89 int first_gp_reg_save; /* first callee saved GP register used */
90 int first_fp_reg_save; /* first callee saved FP register used */
91 int first_altivec_reg_save; /* first callee saved AltiVec register used */
92 int lr_save_p; /* true if the link reg needs to be saved */
93 int cr_save_p; /* true if the CR reg needs to be saved */
94 unsigned int vrsave_mask; /* mask of vec registers to save */
95 int push_p; /* true if we need to allocate stack space */
96 int calls_p; /* true if the function makes any calls */
97 int world_save_p; /* true if we're saving *everything*:
98 r13-r31, cr, f14-f31, vrsave, v20-v31 */
99 enum rs6000_abi abi; /* which ABI to use */
100 int gp_save_offset; /* offset to save GP regs from initial SP */
101 int fp_save_offset; /* offset to save FP regs from initial SP */
102 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
103 int lr_save_offset; /* offset to save LR from initial SP */
104 int cr_save_offset; /* offset to save CR from initial SP */
105 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
106 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
107 int varargs_save_offset; /* offset to save the varargs registers */
108 int ehrd_offset; /* offset to EH return data */
109 int ehcr_offset; /* offset to EH CR field data */
110 int reg_size; /* register size (4 or 8) */
111 HOST_WIDE_INT vars_size; /* variable save area size */
112 int parm_size; /* outgoing parameter size */
113 int save_size; /* save area size */
114 int fixed_size; /* fixed size of stack frame */
115 int gp_size; /* size of saved GP registers */
116 int fp_size; /* size of saved FP registers */
117 int altivec_size; /* size of saved AltiVec registers */
118 int cr_size; /* size to hold CR if not in fixed area */
119 int vrsave_size; /* size to hold VRSAVE */
120 int altivec_padding_size; /* size of altivec alignment padding */
121 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
122 int spe_padding_size;
123 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
124 int spe_64bit_regs_used;
125 int savres_strategy;
126 } rs6000_stack_t;
128 /* A C structure for machine-specific, per-function data.
129 This is added to the cfun structure. */
130 typedef struct GTY(()) machine_function
132 /* Whether the instruction chain has been scanned already. */
133 int spe_insn_chain_scanned_p;
134 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
135 int ra_needs_full_frame;
136 /* Flags if __builtin_return_address (0) was used. */
137 int ra_need_lr;
138 /* Cache lr_save_p after expansion of builtin_eh_return. */
139 int lr_save_state;
140 /* Whether we need to save the TOC to the reserved stack location in the
141 function prologue. */
142 bool save_toc_in_prologue;
143 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
144 varargs save area. */
145 HOST_WIDE_INT varargs_save_offset;
146 /* Temporary stack slot to use for SDmode copies. This slot is
147 64-bits wide and is allocated early enough so that the offset
148 does not overflow the 16-bit load/store offset field. */
149 rtx sdmode_stack_slot;
150 /* Alternative internal arg pointer for -fsplit-stack. */
151 rtx split_stack_arg_pointer;
152 bool split_stack_argp_used;
153 /* Flag if r2 setup is needed with ELFv2 ABI. */
154 bool r2_setup_needed;
155 } machine_function;
157 /* Support targetm.vectorize.builtin_mask_for_load. */
158 static GTY(()) tree altivec_builtin_mask_for_load;
160 /* Set to nonzero once AIX common-mode calls have been defined. */
161 static GTY(()) int common_mode_defined;
163 /* Label number of label created for -mrelocatable, to call to so we can
164 get the address of the GOT section */
165 static int rs6000_pic_labelno;
167 #ifdef USING_ELFOS_H
168 /* Counter for labels which are to be placed in .fixup. */
169 int fixuplabelno = 0;
170 #endif
172 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
173 int dot_symbols;
175 /* Specify the machine mode that pointers have. After generation of rtl, the
176 compiler makes no further distinction between pointers and any other objects
177 of this machine mode. The type is unsigned since not all things that
178 include rs6000.h also include machmode.h. */
179 unsigned rs6000_pmode;
181 /* Width in bits of a pointer. */
182 unsigned rs6000_pointer_size;
184 #ifdef HAVE_AS_GNU_ATTRIBUTE
185 /* Flag whether floating point values have been passed/returned. */
186 static bool rs6000_passes_float;
187 /* Flag whether vector values have been passed/returned. */
188 static bool rs6000_passes_vector;
189 /* Flag whether small (<= 8 byte) structures have been returned. */
190 static bool rs6000_returns_struct;
191 #endif
193 /* Value is TRUE if register/mode pair is acceptable. */
194 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
196 /* Maximum number of registers needed for a given register class and mode. */
197 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
199 /* How many registers are needed for a given register and mode. */
200 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
202 /* Map register number to register class. */
203 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
205 static int dbg_cost_ctrl;
207 /* Built in types. */
208 tree rs6000_builtin_types[RS6000_BTI_MAX];
209 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
211 /* Flag to say the TOC is initialized */
212 int toc_initialized, need_toc_init;
213 char toc_label_name[10];
215 /* Cached value of rs6000_variable_issue. This is cached in
216 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
217 static short cached_can_issue_more;
219 static GTY(()) section *read_only_data_section;
220 static GTY(()) section *private_data_section;
221 static GTY(()) section *tls_data_section;
222 static GTY(()) section *tls_private_data_section;
223 static GTY(()) section *read_only_private_data_section;
224 static GTY(()) section *sdata2_section;
225 static GTY(()) section *toc_section;
227 struct builtin_description
229 const HOST_WIDE_INT mask;
230 const enum insn_code icode;
231 const char *const name;
232 const enum rs6000_builtins code;
235 /* Describe the vector unit used for modes. */
236 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
237 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
239 /* Register classes for various constraints that are based on the target
240 switches. */
241 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
243 /* Describe the alignment of a vector. */
244 int rs6000_vector_align[NUM_MACHINE_MODES];
246 /* Map selected modes to types for builtins. */
247 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
249 /* What modes to automatically generate reciprocal divide estimate (fre) and
250 reciprocal sqrt (frsqrte) for. */
251 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
253 /* Masks to determine which reciprocal esitmate instructions to generate
254 automatically. */
255 enum rs6000_recip_mask {
256 RECIP_SF_DIV = 0x001, /* Use divide estimate */
257 RECIP_DF_DIV = 0x002,
258 RECIP_V4SF_DIV = 0x004,
259 RECIP_V2DF_DIV = 0x008,
261 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
262 RECIP_DF_RSQRT = 0x020,
263 RECIP_V4SF_RSQRT = 0x040,
264 RECIP_V2DF_RSQRT = 0x080,
266 /* Various combination of flags for -mrecip=xxx. */
267 RECIP_NONE = 0,
268 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
269 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
270 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
272 RECIP_HIGH_PRECISION = RECIP_ALL,
274 /* On low precision machines like the power5, don't enable double precision
275 reciprocal square root estimate, since it isn't accurate enough. */
276 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
279 /* -mrecip options. */
280 static struct
282 const char *string; /* option name */
283 unsigned int mask; /* mask bits to set */
284 } recip_options[] = {
285 { "all", RECIP_ALL },
286 { "none", RECIP_NONE },
287 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
288 | RECIP_V2DF_DIV) },
289 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
290 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
291 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
292 | RECIP_V2DF_RSQRT) },
293 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
294 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
297 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
298 static const struct
300 const char *cpu;
301 unsigned int cpuid;
302 } cpu_is_info[] = {
303 { "power9", PPC_PLATFORM_POWER9 },
304 { "power8", PPC_PLATFORM_POWER8 },
305 { "power7", PPC_PLATFORM_POWER7 },
306 { "power6x", PPC_PLATFORM_POWER6X },
307 { "power6", PPC_PLATFORM_POWER6 },
308 { "power5+", PPC_PLATFORM_POWER5_PLUS },
309 { "power5", PPC_PLATFORM_POWER5 },
310 { "ppc970", PPC_PLATFORM_PPC970 },
311 { "power4", PPC_PLATFORM_POWER4 },
312 { "ppca2", PPC_PLATFORM_PPCA2 },
313 { "ppc476", PPC_PLATFORM_PPC476 },
314 { "ppc464", PPC_PLATFORM_PPC464 },
315 { "ppc440", PPC_PLATFORM_PPC440 },
316 { "ppc405", PPC_PLATFORM_PPC405 },
317 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
320 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
321 static const struct
323 const char *hwcap;
324 int mask;
325 unsigned int id;
326 } cpu_supports_info[] = {
327 /* AT_HWCAP masks. */
328 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
329 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
330 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
331 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
332 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
333 { "booke", PPC_FEATURE_BOOKE, 0 },
334 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
335 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
336 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
337 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
338 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
339 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
340 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
341 { "notb", PPC_FEATURE_NO_TB, 0 },
342 { "pa6t", PPC_FEATURE_PA6T, 0 },
343 { "power4", PPC_FEATURE_POWER4, 0 },
344 { "power5", PPC_FEATURE_POWER5, 0 },
345 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
346 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
347 { "ppc32", PPC_FEATURE_32, 0 },
348 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
349 { "ppc64", PPC_FEATURE_64, 0 },
350 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
351 { "smt", PPC_FEATURE_SMT, 0 },
352 { "spe", PPC_FEATURE_HAS_SPE, 0 },
353 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
354 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
355 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
357 /* AT_HWCAP2 masks. */
358 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
359 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
360 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
361 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
362 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
363 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
364 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
365 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
366 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
367 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 }
370 /* Newer LIBCs explicitly export this symbol to declare that they provide
371 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
372 reference to this symbol whenever we expand a CPU builtin, so that
373 we never link against an old LIBC. */
374 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
376 /* True if we have expanded a CPU builtin. */
377 bool cpu_builtin_p;
379 /* Pointer to function (in rs6000-c.c) that can define or undefine target
380 macros that have changed. Languages that don't support the preprocessor
381 don't link in rs6000-c.c, so we can't call it directly. */
382 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
384 /* Simplfy register classes into simpler classifications. We assume
385 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
386 check for standard register classes (gpr/floating/altivec/vsx) and
387 floating/vector classes (float/altivec/vsx). */
389 enum rs6000_reg_type {
390 NO_REG_TYPE,
391 PSEUDO_REG_TYPE,
392 GPR_REG_TYPE,
393 VSX_REG_TYPE,
394 ALTIVEC_REG_TYPE,
395 FPR_REG_TYPE,
396 SPR_REG_TYPE,
397 CR_REG_TYPE,
398 SPE_ACC_TYPE,
399 SPEFSCR_REG_TYPE
402 /* Map register class to register type. */
403 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
405 /* First/last register type for the 'normal' register types (i.e. general
406 purpose, floating point, altivec, and VSX registers). */
407 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
409 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
412 /* Register classes we care about in secondary reload or go if legitimate
413 address. We only need to worry about GPR, FPR, and Altivec registers here,
414 along an ANY field that is the OR of the 3 register classes. */
416 enum rs6000_reload_reg_type {
417 RELOAD_REG_GPR, /* General purpose registers. */
418 RELOAD_REG_FPR, /* Traditional floating point regs. */
419 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
420 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
421 N_RELOAD_REG
424 /* For setting up register classes, loop through the 3 register classes mapping
425 into real registers, and skip the ANY class, which is just an OR of the
426 bits. */
427 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
428 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
430 /* Map reload register type to a register in the register class. */
431 struct reload_reg_map_type {
432 const char *name; /* Register class name. */
433 int reg; /* Register in the register class. */
436 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
437 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
438 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
439 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
440 { "Any", -1 }, /* RELOAD_REG_ANY. */
443 /* Mask bits for each register class, indexed per mode. Historically the
444 compiler has been more restrictive which types can do PRE_MODIFY instead of
445 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
446 typedef unsigned char addr_mask_type;
448 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
449 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
450 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
451 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
452 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
453 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
454 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
455 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
457 /* Register type masks based on the type, of valid addressing modes. */
458 struct rs6000_reg_addr {
459 enum insn_code reload_load; /* INSN to reload for loading. */
460 enum insn_code reload_store; /* INSN to reload for storing. */
461 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
462 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
463 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
464 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
465 /* INSNs for fusing addi with loads
466 or stores for each reg. class. */
467 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
468 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
469 /* INSNs for fusing addis with loads
470 or stores for each reg. class. */
471 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
472 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
473 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
474 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
475 bool fused_toc; /* Mode supports TOC fusion. */
478 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
480 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
481 static inline bool
482 mode_supports_pre_incdec_p (machine_mode mode)
484 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
485 != 0);
488 /* Helper function to say whether a mode supports PRE_MODIFY. */
489 static inline bool
490 mode_supports_pre_modify_p (machine_mode mode)
492 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
493 != 0);
496 /* Return true if we have D-form addressing in altivec registers. */
497 static inline bool
498 mode_supports_vmx_dform (machine_mode mode)
500 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
503 /* Return true if we have D-form addressing in VSX registers. This addressing
504 is more limited than normal d-form addressing in that the offset must be
505 aligned on a 16-byte boundary. */
506 static inline bool
507 mode_supports_vsx_dform_quad (machine_mode mode)
509 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
510 != 0);
514 /* Target cpu costs. */
516 struct processor_costs {
517 const int mulsi; /* cost of SImode multiplication. */
518 const int mulsi_const; /* cost of SImode multiplication by constant. */
519 const int mulsi_const9; /* cost of SImode mult by short constant. */
520 const int muldi; /* cost of DImode multiplication. */
521 const int divsi; /* cost of SImode division. */
522 const int divdi; /* cost of DImode division. */
523 const int fp; /* cost of simple SFmode and DFmode insns. */
524 const int dmul; /* cost of DFmode multiplication (and fmadd). */
525 const int sdiv; /* cost of SFmode division (fdivs). */
526 const int ddiv; /* cost of DFmode division (fdiv). */
527 const int cache_line_size; /* cache line size in bytes. */
528 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
529 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
530 const int simultaneous_prefetches; /* number of parallel prefetch
531 operations. */
532 const int sfdf_convert; /* cost of SF->DF conversion. */
535 const struct processor_costs *rs6000_cost;
537 /* Processor costs (relative to an add) */
539 /* Instruction size costs on 32bit processors. */
540 static const
541 struct processor_costs size32_cost = {
542 COSTS_N_INSNS (1), /* mulsi */
543 COSTS_N_INSNS (1), /* mulsi_const */
544 COSTS_N_INSNS (1), /* mulsi_const9 */
545 COSTS_N_INSNS (1), /* muldi */
546 COSTS_N_INSNS (1), /* divsi */
547 COSTS_N_INSNS (1), /* divdi */
548 COSTS_N_INSNS (1), /* fp */
549 COSTS_N_INSNS (1), /* dmul */
550 COSTS_N_INSNS (1), /* sdiv */
551 COSTS_N_INSNS (1), /* ddiv */
552 32, /* cache line size */
553 0, /* l1 cache */
554 0, /* l2 cache */
555 0, /* streams */
556 0, /* SF->DF convert */
559 /* Instruction size costs on 64bit processors. */
560 static const
561 struct processor_costs size64_cost = {
562 COSTS_N_INSNS (1), /* mulsi */
563 COSTS_N_INSNS (1), /* mulsi_const */
564 COSTS_N_INSNS (1), /* mulsi_const9 */
565 COSTS_N_INSNS (1), /* muldi */
566 COSTS_N_INSNS (1), /* divsi */
567 COSTS_N_INSNS (1), /* divdi */
568 COSTS_N_INSNS (1), /* fp */
569 COSTS_N_INSNS (1), /* dmul */
570 COSTS_N_INSNS (1), /* sdiv */
571 COSTS_N_INSNS (1), /* ddiv */
572 128, /* cache line size */
573 0, /* l1 cache */
574 0, /* l2 cache */
575 0, /* streams */
576 0, /* SF->DF convert */
579 /* Instruction costs on RS64A processors. */
580 static const
581 struct processor_costs rs64a_cost = {
582 COSTS_N_INSNS (20), /* mulsi */
583 COSTS_N_INSNS (12), /* mulsi_const */
584 COSTS_N_INSNS (8), /* mulsi_const9 */
585 COSTS_N_INSNS (34), /* muldi */
586 COSTS_N_INSNS (65), /* divsi */
587 COSTS_N_INSNS (67), /* divdi */
588 COSTS_N_INSNS (4), /* fp */
589 COSTS_N_INSNS (4), /* dmul */
590 COSTS_N_INSNS (31), /* sdiv */
591 COSTS_N_INSNS (31), /* ddiv */
592 128, /* cache line size */
593 128, /* l1 cache */
594 2048, /* l2 cache */
595 1, /* streams */
596 0, /* SF->DF convert */
599 /* Instruction costs on MPCCORE processors. */
600 static const
601 struct processor_costs mpccore_cost = {
602 COSTS_N_INSNS (2), /* mulsi */
603 COSTS_N_INSNS (2), /* mulsi_const */
604 COSTS_N_INSNS (2), /* mulsi_const9 */
605 COSTS_N_INSNS (2), /* muldi */
606 COSTS_N_INSNS (6), /* divsi */
607 COSTS_N_INSNS (6), /* divdi */
608 COSTS_N_INSNS (4), /* fp */
609 COSTS_N_INSNS (5), /* dmul */
610 COSTS_N_INSNS (10), /* sdiv */
611 COSTS_N_INSNS (17), /* ddiv */
612 32, /* cache line size */
613 4, /* l1 cache */
614 16, /* l2 cache */
615 1, /* streams */
616 0, /* SF->DF convert */
619 /* Instruction costs on PPC403 processors. */
620 static const
621 struct processor_costs ppc403_cost = {
622 COSTS_N_INSNS (4), /* mulsi */
623 COSTS_N_INSNS (4), /* mulsi_const */
624 COSTS_N_INSNS (4), /* mulsi_const9 */
625 COSTS_N_INSNS (4), /* muldi */
626 COSTS_N_INSNS (33), /* divsi */
627 COSTS_N_INSNS (33), /* divdi */
628 COSTS_N_INSNS (11), /* fp */
629 COSTS_N_INSNS (11), /* dmul */
630 COSTS_N_INSNS (11), /* sdiv */
631 COSTS_N_INSNS (11), /* ddiv */
632 32, /* cache line size */
633 4, /* l1 cache */
634 16, /* l2 cache */
635 1, /* streams */
636 0, /* SF->DF convert */
639 /* Instruction costs on PPC405 processors. */
640 static const
641 struct processor_costs ppc405_cost = {
642 COSTS_N_INSNS (5), /* mulsi */
643 COSTS_N_INSNS (4), /* mulsi_const */
644 COSTS_N_INSNS (3), /* mulsi_const9 */
645 COSTS_N_INSNS (5), /* muldi */
646 COSTS_N_INSNS (35), /* divsi */
647 COSTS_N_INSNS (35), /* divdi */
648 COSTS_N_INSNS (11), /* fp */
649 COSTS_N_INSNS (11), /* dmul */
650 COSTS_N_INSNS (11), /* sdiv */
651 COSTS_N_INSNS (11), /* ddiv */
652 32, /* cache line size */
653 16, /* l1 cache */
654 128, /* l2 cache */
655 1, /* streams */
656 0, /* SF->DF convert */
659 /* Instruction costs on PPC440 processors. */
660 static const
661 struct processor_costs ppc440_cost = {
662 COSTS_N_INSNS (3), /* mulsi */
663 COSTS_N_INSNS (2), /* mulsi_const */
664 COSTS_N_INSNS (2), /* mulsi_const9 */
665 COSTS_N_INSNS (3), /* muldi */
666 COSTS_N_INSNS (34), /* divsi */
667 COSTS_N_INSNS (34), /* divdi */
668 COSTS_N_INSNS (5), /* fp */
669 COSTS_N_INSNS (5), /* dmul */
670 COSTS_N_INSNS (19), /* sdiv */
671 COSTS_N_INSNS (33), /* ddiv */
672 32, /* cache line size */
673 32, /* l1 cache */
674 256, /* l2 cache */
675 1, /* streams */
676 0, /* SF->DF convert */
679 /* Instruction costs on PPC476 processors. */
680 static const
681 struct processor_costs ppc476_cost = {
682 COSTS_N_INSNS (4), /* mulsi */
683 COSTS_N_INSNS (4), /* mulsi_const */
684 COSTS_N_INSNS (4), /* mulsi_const9 */
685 COSTS_N_INSNS (4), /* muldi */
686 COSTS_N_INSNS (11), /* divsi */
687 COSTS_N_INSNS (11), /* divdi */
688 COSTS_N_INSNS (6), /* fp */
689 COSTS_N_INSNS (6), /* dmul */
690 COSTS_N_INSNS (19), /* sdiv */
691 COSTS_N_INSNS (33), /* ddiv */
692 32, /* l1 cache line size */
693 32, /* l1 cache */
694 512, /* l2 cache */
695 1, /* streams */
696 0, /* SF->DF convert */
699 /* Instruction costs on PPC601 processors. */
700 static const
701 struct processor_costs ppc601_cost = {
702 COSTS_N_INSNS (5), /* mulsi */
703 COSTS_N_INSNS (5), /* mulsi_const */
704 COSTS_N_INSNS (5), /* mulsi_const9 */
705 COSTS_N_INSNS (5), /* muldi */
706 COSTS_N_INSNS (36), /* divsi */
707 COSTS_N_INSNS (36), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (5), /* dmul */
710 COSTS_N_INSNS (17), /* sdiv */
711 COSTS_N_INSNS (31), /* ddiv */
712 32, /* cache line size */
713 32, /* l1 cache */
714 256, /* l2 cache */
715 1, /* streams */
716 0, /* SF->DF convert */
719 /* Instruction costs on PPC603 processors. */
720 static const
721 struct processor_costs ppc603_cost = {
722 COSTS_N_INSNS (5), /* mulsi */
723 COSTS_N_INSNS (3), /* mulsi_const */
724 COSTS_N_INSNS (2), /* mulsi_const9 */
725 COSTS_N_INSNS (5), /* muldi */
726 COSTS_N_INSNS (37), /* divsi */
727 COSTS_N_INSNS (37), /* divdi */
728 COSTS_N_INSNS (3), /* fp */
729 COSTS_N_INSNS (4), /* dmul */
730 COSTS_N_INSNS (18), /* sdiv */
731 COSTS_N_INSNS (33), /* ddiv */
732 32, /* cache line size */
733 8, /* l1 cache */
734 64, /* l2 cache */
735 1, /* streams */
736 0, /* SF->DF convert */
739 /* Instruction costs on PPC604 processors. */
740 static const
741 struct processor_costs ppc604_cost = {
742 COSTS_N_INSNS (4), /* mulsi */
743 COSTS_N_INSNS (4), /* mulsi_const */
744 COSTS_N_INSNS (4), /* mulsi_const9 */
745 COSTS_N_INSNS (4), /* muldi */
746 COSTS_N_INSNS (20), /* divsi */
747 COSTS_N_INSNS (20), /* divdi */
748 COSTS_N_INSNS (3), /* fp */
749 COSTS_N_INSNS (3), /* dmul */
750 COSTS_N_INSNS (18), /* sdiv */
751 COSTS_N_INSNS (32), /* ddiv */
752 32, /* cache line size */
753 16, /* l1 cache */
754 512, /* l2 cache */
755 1, /* streams */
756 0, /* SF->DF convert */
759 /* Instruction costs on PPC604e processors. */
760 static const
761 struct processor_costs ppc604e_cost = {
762 COSTS_N_INSNS (2), /* mulsi */
763 COSTS_N_INSNS (2), /* mulsi_const */
764 COSTS_N_INSNS (2), /* mulsi_const9 */
765 COSTS_N_INSNS (2), /* muldi */
766 COSTS_N_INSNS (20), /* divsi */
767 COSTS_N_INSNS (20), /* divdi */
768 COSTS_N_INSNS (3), /* fp */
769 COSTS_N_INSNS (3), /* dmul */
770 COSTS_N_INSNS (18), /* sdiv */
771 COSTS_N_INSNS (32), /* ddiv */
772 32, /* cache line size */
773 32, /* l1 cache */
774 1024, /* l2 cache */
775 1, /* streams */
776 0, /* SF->DF convert */
779 /* Instruction costs on PPC620 processors. */
780 static const
781 struct processor_costs ppc620_cost = {
782 COSTS_N_INSNS (5), /* mulsi */
783 COSTS_N_INSNS (4), /* mulsi_const */
784 COSTS_N_INSNS (3), /* mulsi_const9 */
785 COSTS_N_INSNS (7), /* muldi */
786 COSTS_N_INSNS (21), /* divsi */
787 COSTS_N_INSNS (37), /* divdi */
788 COSTS_N_INSNS (3), /* fp */
789 COSTS_N_INSNS (3), /* dmul */
790 COSTS_N_INSNS (18), /* sdiv */
791 COSTS_N_INSNS (32), /* ddiv */
792 128, /* cache line size */
793 32, /* l1 cache */
794 1024, /* l2 cache */
795 1, /* streams */
796 0, /* SF->DF convert */
799 /* Instruction costs on PPC630 processors. */
800 static const
801 struct processor_costs ppc630_cost = {
802 COSTS_N_INSNS (5), /* mulsi */
803 COSTS_N_INSNS (4), /* mulsi_const */
804 COSTS_N_INSNS (3), /* mulsi_const9 */
805 COSTS_N_INSNS (7), /* muldi */
806 COSTS_N_INSNS (21), /* divsi */
807 COSTS_N_INSNS (37), /* divdi */
808 COSTS_N_INSNS (3), /* fp */
809 COSTS_N_INSNS (3), /* dmul */
810 COSTS_N_INSNS (17), /* sdiv */
811 COSTS_N_INSNS (21), /* ddiv */
812 128, /* cache line size */
813 64, /* l1 cache */
814 1024, /* l2 cache */
815 1, /* streams */
816 0, /* SF->DF convert */
819 /* Instruction costs on Cell processor. */
820 /* COSTS_N_INSNS (1) ~ one add. */
821 static const
822 struct processor_costs ppccell_cost = {
823 COSTS_N_INSNS (9/2)+2, /* mulsi */
824 COSTS_N_INSNS (6/2), /* mulsi_const */
825 COSTS_N_INSNS (6/2), /* mulsi_const9 */
826 COSTS_N_INSNS (15/2)+2, /* muldi */
827 COSTS_N_INSNS (38/2), /* divsi */
828 COSTS_N_INSNS (70/2), /* divdi */
829 COSTS_N_INSNS (10/2), /* fp */
830 COSTS_N_INSNS (10/2), /* dmul */
831 COSTS_N_INSNS (74/2), /* sdiv */
832 COSTS_N_INSNS (74/2), /* ddiv */
833 128, /* cache line size */
834 32, /* l1 cache */
835 512, /* l2 cache */
836 6, /* streams */
837 0, /* SF->DF convert */
840 /* Instruction costs on PPC750 and PPC7400 processors. */
841 static const
842 struct processor_costs ppc750_cost = {
843 COSTS_N_INSNS (5), /* mulsi */
844 COSTS_N_INSNS (3), /* mulsi_const */
845 COSTS_N_INSNS (2), /* mulsi_const9 */
846 COSTS_N_INSNS (5), /* muldi */
847 COSTS_N_INSNS (17), /* divsi */
848 COSTS_N_INSNS (17), /* divdi */
849 COSTS_N_INSNS (3), /* fp */
850 COSTS_N_INSNS (3), /* dmul */
851 COSTS_N_INSNS (17), /* sdiv */
852 COSTS_N_INSNS (31), /* ddiv */
853 32, /* cache line size */
854 32, /* l1 cache */
855 512, /* l2 cache */
856 1, /* streams */
857 0, /* SF->DF convert */
860 /* Instruction costs on PPC7450 processors. */
861 static const
862 struct processor_costs ppc7450_cost = {
863 COSTS_N_INSNS (4), /* mulsi */
864 COSTS_N_INSNS (3), /* mulsi_const */
865 COSTS_N_INSNS (3), /* mulsi_const9 */
866 COSTS_N_INSNS (4), /* muldi */
867 COSTS_N_INSNS (23), /* divsi */
868 COSTS_N_INSNS (23), /* divdi */
869 COSTS_N_INSNS (5), /* fp */
870 COSTS_N_INSNS (5), /* dmul */
871 COSTS_N_INSNS (21), /* sdiv */
872 COSTS_N_INSNS (35), /* ddiv */
873 32, /* cache line size */
874 32, /* l1 cache */
875 1024, /* l2 cache */
876 1, /* streams */
877 0, /* SF->DF convert */
880 /* Instruction costs on PPC8540 processors. */
881 static const
882 struct processor_costs ppc8540_cost = {
883 COSTS_N_INSNS (4), /* mulsi */
884 COSTS_N_INSNS (4), /* mulsi_const */
885 COSTS_N_INSNS (4), /* mulsi_const9 */
886 COSTS_N_INSNS (4), /* muldi */
887 COSTS_N_INSNS (19), /* divsi */
888 COSTS_N_INSNS (19), /* divdi */
889 COSTS_N_INSNS (4), /* fp */
890 COSTS_N_INSNS (4), /* dmul */
891 COSTS_N_INSNS (29), /* sdiv */
892 COSTS_N_INSNS (29), /* ddiv */
893 32, /* cache line size */
894 32, /* l1 cache */
895 256, /* l2 cache */
896 1, /* prefetch streams /*/
897 0, /* SF->DF convert */
900 /* Instruction costs on E300C2 and E300C3 cores. */
901 static const
902 struct processor_costs ppce300c2c3_cost = {
903 COSTS_N_INSNS (4), /* mulsi */
904 COSTS_N_INSNS (4), /* mulsi_const */
905 COSTS_N_INSNS (4), /* mulsi_const9 */
906 COSTS_N_INSNS (4), /* muldi */
907 COSTS_N_INSNS (19), /* divsi */
908 COSTS_N_INSNS (19), /* divdi */
909 COSTS_N_INSNS (3), /* fp */
910 COSTS_N_INSNS (4), /* dmul */
911 COSTS_N_INSNS (18), /* sdiv */
912 COSTS_N_INSNS (33), /* ddiv */
914 16, /* l1 cache */
915 16, /* l2 cache */
916 1, /* prefetch streams /*/
917 0, /* SF->DF convert */
920 /* Instruction costs on PPCE500MC processors. */
921 static const
922 struct processor_costs ppce500mc_cost = {
923 COSTS_N_INSNS (4), /* mulsi */
924 COSTS_N_INSNS (4), /* mulsi_const */
925 COSTS_N_INSNS (4), /* mulsi_const9 */
926 COSTS_N_INSNS (4), /* muldi */
927 COSTS_N_INSNS (14), /* divsi */
928 COSTS_N_INSNS (14), /* divdi */
929 COSTS_N_INSNS (8), /* fp */
930 COSTS_N_INSNS (10), /* dmul */
931 COSTS_N_INSNS (36), /* sdiv */
932 COSTS_N_INSNS (66), /* ddiv */
933 64, /* cache line size */
934 32, /* l1 cache */
935 128, /* l2 cache */
936 1, /* prefetch streams /*/
937 0, /* SF->DF convert */
940 /* Instruction costs on PPCE500MC64 processors. */
941 static const
942 struct processor_costs ppce500mc64_cost = {
943 COSTS_N_INSNS (4), /* mulsi */
944 COSTS_N_INSNS (4), /* mulsi_const */
945 COSTS_N_INSNS (4), /* mulsi_const9 */
946 COSTS_N_INSNS (4), /* muldi */
947 COSTS_N_INSNS (14), /* divsi */
948 COSTS_N_INSNS (14), /* divdi */
949 COSTS_N_INSNS (4), /* fp */
950 COSTS_N_INSNS (10), /* dmul */
951 COSTS_N_INSNS (36), /* sdiv */
952 COSTS_N_INSNS (66), /* ddiv */
953 64, /* cache line size */
954 32, /* l1 cache */
955 128, /* l2 cache */
956 1, /* prefetch streams /*/
957 0, /* SF->DF convert */
960 /* Instruction costs on PPCE5500 processors. */
961 static const
962 struct processor_costs ppce5500_cost = {
963 COSTS_N_INSNS (5), /* mulsi */
964 COSTS_N_INSNS (5), /* mulsi_const */
965 COSTS_N_INSNS (4), /* mulsi_const9 */
966 COSTS_N_INSNS (5), /* muldi */
967 COSTS_N_INSNS (14), /* divsi */
968 COSTS_N_INSNS (14), /* divdi */
969 COSTS_N_INSNS (7), /* fp */
970 COSTS_N_INSNS (10), /* dmul */
971 COSTS_N_INSNS (36), /* sdiv */
972 COSTS_N_INSNS (66), /* ddiv */
973 64, /* cache line size */
974 32, /* l1 cache */
975 128, /* l2 cache */
976 1, /* prefetch streams /*/
977 0, /* SF->DF convert */
980 /* Instruction costs on PPCE6500 processors. */
981 static const
982 struct processor_costs ppce6500_cost = {
983 COSTS_N_INSNS (5), /* mulsi */
984 COSTS_N_INSNS (5), /* mulsi_const */
985 COSTS_N_INSNS (4), /* mulsi_const9 */
986 COSTS_N_INSNS (5), /* muldi */
987 COSTS_N_INSNS (14), /* divsi */
988 COSTS_N_INSNS (14), /* divdi */
989 COSTS_N_INSNS (7), /* fp */
990 COSTS_N_INSNS (10), /* dmul */
991 COSTS_N_INSNS (36), /* sdiv */
992 COSTS_N_INSNS (66), /* ddiv */
993 64, /* cache line size */
994 32, /* l1 cache */
995 128, /* l2 cache */
996 1, /* prefetch streams /*/
997 0, /* SF->DF convert */
1000 /* Instruction costs on AppliedMicro Titan processors. */
1001 static const
1002 struct processor_costs titan_cost = {
1003 COSTS_N_INSNS (5), /* mulsi */
1004 COSTS_N_INSNS (5), /* mulsi_const */
1005 COSTS_N_INSNS (5), /* mulsi_const9 */
1006 COSTS_N_INSNS (5), /* muldi */
1007 COSTS_N_INSNS (18), /* divsi */
1008 COSTS_N_INSNS (18), /* divdi */
1009 COSTS_N_INSNS (10), /* fp */
1010 COSTS_N_INSNS (10), /* dmul */
1011 COSTS_N_INSNS (46), /* sdiv */
1012 COSTS_N_INSNS (72), /* ddiv */
1013 32, /* cache line size */
1014 32, /* l1 cache */
1015 512, /* l2 cache */
1016 1, /* prefetch streams /*/
1017 0, /* SF->DF convert */
1020 /* Instruction costs on POWER4 and POWER5 processors. */
1021 static const
1022 struct processor_costs power4_cost = {
1023 COSTS_N_INSNS (3), /* mulsi */
1024 COSTS_N_INSNS (2), /* mulsi_const */
1025 COSTS_N_INSNS (2), /* mulsi_const9 */
1026 COSTS_N_INSNS (4), /* muldi */
1027 COSTS_N_INSNS (18), /* divsi */
1028 COSTS_N_INSNS (34), /* divdi */
1029 COSTS_N_INSNS (3), /* fp */
1030 COSTS_N_INSNS (3), /* dmul */
1031 COSTS_N_INSNS (17), /* sdiv */
1032 COSTS_N_INSNS (17), /* ddiv */
1033 128, /* cache line size */
1034 32, /* l1 cache */
1035 1024, /* l2 cache */
1036 8, /* prefetch streams /*/
1037 0, /* SF->DF convert */
1040 /* Instruction costs on POWER6 processors. */
1041 static const
1042 struct processor_costs power6_cost = {
1043 COSTS_N_INSNS (8), /* mulsi */
1044 COSTS_N_INSNS (8), /* mulsi_const */
1045 COSTS_N_INSNS (8), /* mulsi_const9 */
1046 COSTS_N_INSNS (8), /* muldi */
1047 COSTS_N_INSNS (22), /* divsi */
1048 COSTS_N_INSNS (28), /* divdi */
1049 COSTS_N_INSNS (3), /* fp */
1050 COSTS_N_INSNS (3), /* dmul */
1051 COSTS_N_INSNS (13), /* sdiv */
1052 COSTS_N_INSNS (16), /* ddiv */
1053 128, /* cache line size */
1054 64, /* l1 cache */
1055 2048, /* l2 cache */
1056 16, /* prefetch streams */
1057 0, /* SF->DF convert */
1060 /* Instruction costs on POWER7 processors. */
1061 static const
1062 struct processor_costs power7_cost = {
1063 COSTS_N_INSNS (2), /* mulsi */
1064 COSTS_N_INSNS (2), /* mulsi_const */
1065 COSTS_N_INSNS (2), /* mulsi_const9 */
1066 COSTS_N_INSNS (2), /* muldi */
1067 COSTS_N_INSNS (18), /* divsi */
1068 COSTS_N_INSNS (34), /* divdi */
1069 COSTS_N_INSNS (3), /* fp */
1070 COSTS_N_INSNS (3), /* dmul */
1071 COSTS_N_INSNS (13), /* sdiv */
1072 COSTS_N_INSNS (16), /* ddiv */
1073 128, /* cache line size */
1074 32, /* l1 cache */
1075 256, /* l2 cache */
1076 12, /* prefetch streams */
1077 COSTS_N_INSNS (3), /* SF->DF convert */
1080 /* Instruction costs on POWER8 processors. */
1081 static const
1082 struct processor_costs power8_cost = {
1083 COSTS_N_INSNS (3), /* mulsi */
1084 COSTS_N_INSNS (3), /* mulsi_const */
1085 COSTS_N_INSNS (3), /* mulsi_const9 */
1086 COSTS_N_INSNS (3), /* muldi */
1087 COSTS_N_INSNS (19), /* divsi */
1088 COSTS_N_INSNS (35), /* divdi */
1089 COSTS_N_INSNS (3), /* fp */
1090 COSTS_N_INSNS (3), /* dmul */
1091 COSTS_N_INSNS (14), /* sdiv */
1092 COSTS_N_INSNS (17), /* ddiv */
1093 128, /* cache line size */
1094 32, /* l1 cache */
1095 256, /* l2 cache */
1096 12, /* prefetch streams */
1097 COSTS_N_INSNS (3), /* SF->DF convert */
1100 /* Instruction costs on POWER9 processors. */
1101 static const
1102 struct processor_costs power9_cost = {
1103 COSTS_N_INSNS (3), /* mulsi */
1104 COSTS_N_INSNS (3), /* mulsi_const */
1105 COSTS_N_INSNS (3), /* mulsi_const9 */
1106 COSTS_N_INSNS (3), /* muldi */
1107 COSTS_N_INSNS (8), /* divsi */
1108 COSTS_N_INSNS (12), /* divdi */
1109 COSTS_N_INSNS (3), /* fp */
1110 COSTS_N_INSNS (3), /* dmul */
1111 COSTS_N_INSNS (13), /* sdiv */
1112 COSTS_N_INSNS (18), /* ddiv */
1113 128, /* cache line size */
1114 32, /* l1 cache */
1115 512, /* l2 cache */
1116 8, /* prefetch streams */
1117 COSTS_N_INSNS (3), /* SF->DF convert */
1120 /* Instruction costs on POWER A2 processors. */
1121 static const
1122 struct processor_costs ppca2_cost = {
1123 COSTS_N_INSNS (16), /* mulsi */
1124 COSTS_N_INSNS (16), /* mulsi_const */
1125 COSTS_N_INSNS (16), /* mulsi_const9 */
1126 COSTS_N_INSNS (16), /* muldi */
1127 COSTS_N_INSNS (22), /* divsi */
1128 COSTS_N_INSNS (28), /* divdi */
1129 COSTS_N_INSNS (3), /* fp */
1130 COSTS_N_INSNS (3), /* dmul */
1131 COSTS_N_INSNS (59), /* sdiv */
1132 COSTS_N_INSNS (72), /* ddiv */
1134 16, /* l1 cache */
1135 2048, /* l2 cache */
1136 16, /* prefetch streams */
1137 0, /* SF->DF convert */
1141 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1142 #undef RS6000_BUILTIN_0
1143 #undef RS6000_BUILTIN_1
1144 #undef RS6000_BUILTIN_2
1145 #undef RS6000_BUILTIN_3
1146 #undef RS6000_BUILTIN_A
1147 #undef RS6000_BUILTIN_D
1148 #undef RS6000_BUILTIN_E
1149 #undef RS6000_BUILTIN_H
1150 #undef RS6000_BUILTIN_P
1151 #undef RS6000_BUILTIN_Q
1152 #undef RS6000_BUILTIN_S
1153 #undef RS6000_BUILTIN_X
1155 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1156 { NAME, ICODE, MASK, ATTR },
1158 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1159 { NAME, ICODE, MASK, ATTR },
1161 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1162 { NAME, ICODE, MASK, ATTR },
1164 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1165 { NAME, ICODE, MASK, ATTR },
1167 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1168 { NAME, ICODE, MASK, ATTR },
1170 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1171 { NAME, ICODE, MASK, ATTR },
1173 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1174 { NAME, ICODE, MASK, ATTR },
1176 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1177 { NAME, ICODE, MASK, ATTR },
1179 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1180 { NAME, ICODE, MASK, ATTR },
1182 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1183 { NAME, ICODE, MASK, ATTR },
1185 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1186 { NAME, ICODE, MASK, ATTR },
1188 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1189 { NAME, ICODE, MASK, ATTR },
1191 struct rs6000_builtin_info_type {
1192 const char *name;
1193 const enum insn_code icode;
1194 const HOST_WIDE_INT mask;
1195 const unsigned attr;
1198 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1200 #include "rs6000-builtin.def"
1203 #undef RS6000_BUILTIN_0
1204 #undef RS6000_BUILTIN_1
1205 #undef RS6000_BUILTIN_2
1206 #undef RS6000_BUILTIN_3
1207 #undef RS6000_BUILTIN_A
1208 #undef RS6000_BUILTIN_D
1209 #undef RS6000_BUILTIN_E
1210 #undef RS6000_BUILTIN_H
1211 #undef RS6000_BUILTIN_P
1212 #undef RS6000_BUILTIN_Q
1213 #undef RS6000_BUILTIN_S
1214 #undef RS6000_BUILTIN_X
1216 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1217 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1220 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1221 static bool spe_func_has_64bit_regs_p (void);
1222 static struct machine_function * rs6000_init_machine_status (void);
1223 static int rs6000_ra_ever_killed (void);
1224 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1225 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1226 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1227 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1228 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1229 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1230 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1231 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1232 bool);
1233 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1234 unsigned int);
1235 static bool is_microcoded_insn (rtx_insn *);
1236 static bool is_nonpipeline_insn (rtx_insn *);
1237 static bool is_cracked_insn (rtx_insn *);
1238 static bool is_load_insn (rtx, rtx *);
1239 static bool is_store_insn (rtx, rtx *);
1240 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1241 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1242 static bool insn_must_be_first_in_group (rtx_insn *);
1243 static bool insn_must_be_last_in_group (rtx_insn *);
1244 static void altivec_init_builtins (void);
1245 static tree builtin_function_type (machine_mode, machine_mode,
1246 machine_mode, machine_mode,
1247 enum rs6000_builtins, const char *name);
1248 static void rs6000_common_init_builtins (void);
1249 static void paired_init_builtins (void);
1250 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1251 static void spe_init_builtins (void);
1252 static void htm_init_builtins (void);
1253 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1254 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1255 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1256 static rs6000_stack_t *rs6000_stack_info (void);
1257 static void is_altivec_return_reg (rtx, void *);
1258 int easy_vector_constant (rtx, machine_mode);
1259 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1260 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1261 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1262 bool, bool);
1263 #if TARGET_MACHO
1264 static void macho_branch_islands (void);
1265 #endif
1266 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1267 int, int *);
1268 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1269 int, int, int *);
1270 static bool rs6000_mode_dependent_address (const_rtx);
1271 static bool rs6000_debug_mode_dependent_address (const_rtx);
1272 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1273 machine_mode, rtx);
1274 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1275 machine_mode,
1276 rtx);
1277 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1278 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1279 enum reg_class);
1280 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1281 machine_mode);
1282 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1283 enum reg_class,
1284 machine_mode);
1285 static bool rs6000_cannot_change_mode_class (machine_mode,
1286 machine_mode,
1287 enum reg_class);
1288 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1289 machine_mode,
1290 enum reg_class);
1291 static bool rs6000_save_toc_in_prologue_p (void);
1292 static rtx rs6000_internal_arg_pointer (void);
1294 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1295 int, int *)
1296 = rs6000_legitimize_reload_address;
1298 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1299 = rs6000_mode_dependent_address;
1301 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1302 machine_mode, rtx)
1303 = rs6000_secondary_reload_class;
1305 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1306 = rs6000_preferred_reload_class;
1308 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1309 machine_mode)
1310 = rs6000_secondary_memory_needed;
1312 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1313 machine_mode,
1314 enum reg_class)
1315 = rs6000_cannot_change_mode_class;
1317 const int INSN_NOT_AVAILABLE = -1;
1319 static void rs6000_print_isa_options (FILE *, int, const char *,
1320 HOST_WIDE_INT);
1321 static void rs6000_print_builtin_options (FILE *, int, const char *,
1322 HOST_WIDE_INT);
1324 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1325 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1326 enum rs6000_reg_type,
1327 machine_mode,
1328 secondary_reload_info *,
1329 bool);
1330 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1331 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1332 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1334 /* Hash table stuff for keeping track of TOC entries. */
1336 struct GTY((for_user)) toc_hash_struct
1338 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1339 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1340 rtx key;
1341 machine_mode key_mode;
1342 int labelno;
1345 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1347 static hashval_t hash (toc_hash_struct *);
1348 static bool equal (toc_hash_struct *, toc_hash_struct *);
1351 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1353 /* Hash table to keep track of the argument types for builtin functions. */
1355 struct GTY((for_user)) builtin_hash_struct
1357 tree type;
1358 machine_mode mode[4]; /* return value + 3 arguments. */
1359 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1362 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1364 static hashval_t hash (builtin_hash_struct *);
1365 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1368 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1371 /* Default register names. */
1372 char rs6000_reg_names[][8] =
1374 "0", "1", "2", "3", "4", "5", "6", "7",
1375 "8", "9", "10", "11", "12", "13", "14", "15",
1376 "16", "17", "18", "19", "20", "21", "22", "23",
1377 "24", "25", "26", "27", "28", "29", "30", "31",
1378 "0", "1", "2", "3", "4", "5", "6", "7",
1379 "8", "9", "10", "11", "12", "13", "14", "15",
1380 "16", "17", "18", "19", "20", "21", "22", "23",
1381 "24", "25", "26", "27", "28", "29", "30", "31",
1382 "mq", "lr", "ctr","ap",
1383 "0", "1", "2", "3", "4", "5", "6", "7",
1384 "ca",
1385 /* AltiVec registers. */
1386 "0", "1", "2", "3", "4", "5", "6", "7",
1387 "8", "9", "10", "11", "12", "13", "14", "15",
1388 "16", "17", "18", "19", "20", "21", "22", "23",
1389 "24", "25", "26", "27", "28", "29", "30", "31",
1390 "vrsave", "vscr",
1391 /* SPE registers. */
1392 "spe_acc", "spefscr",
1393 /* Soft frame pointer. */
1394 "sfp",
1395 /* HTM SPR registers. */
1396 "tfhar", "tfiar", "texasr",
1397 /* SPE High registers. */
1398 "0", "1", "2", "3", "4", "5", "6", "7",
1399 "8", "9", "10", "11", "12", "13", "14", "15",
1400 "16", "17", "18", "19", "20", "21", "22", "23",
1401 "24", "25", "26", "27", "28", "29", "30", "31"
1404 #ifdef TARGET_REGNAMES
1405 static const char alt_reg_names[][8] =
1407 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1408 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1409 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1410 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1411 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1412 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1413 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1414 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1415 "mq", "lr", "ctr", "ap",
1416 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1417 "ca",
1418 /* AltiVec registers. */
1419 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1420 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1421 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1422 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1423 "vrsave", "vscr",
1424 /* SPE registers. */
1425 "spe_acc", "spefscr",
1426 /* Soft frame pointer. */
1427 "sfp",
1428 /* HTM SPR registers. */
1429 "tfhar", "tfiar", "texasr",
1430 /* SPE High registers. */
1431 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1432 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1433 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1434 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1436 #endif
1438 /* Table of valid machine attributes. */
1440 static const struct attribute_spec rs6000_attribute_table[] =
1442 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1443 affects_type_identity } */
1444 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1445 false },
1446 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1447 false },
1448 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1449 false },
1450 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1451 false },
1452 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1453 false },
1454 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1455 SUBTARGET_ATTRIBUTE_TABLE,
1456 #endif
1457 { NULL, 0, 0, false, false, false, NULL, false }
1460 #ifndef TARGET_PROFILE_KERNEL
1461 #define TARGET_PROFILE_KERNEL 0
1462 #endif
1464 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1465 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1467 /* Initialize the GCC target structure. */
1468 #undef TARGET_ATTRIBUTE_TABLE
1469 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1470 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1471 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1472 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1473 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1475 #undef TARGET_ASM_ALIGNED_DI_OP
1476 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1478 /* Default unaligned ops are only provided for ELF. Find the ops needed
1479 for non-ELF systems. */
1480 #ifndef OBJECT_FORMAT_ELF
1481 #if TARGET_XCOFF
1482 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1483 64-bit targets. */
1484 #undef TARGET_ASM_UNALIGNED_HI_OP
1485 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1486 #undef TARGET_ASM_UNALIGNED_SI_OP
1487 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1488 #undef TARGET_ASM_UNALIGNED_DI_OP
1489 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1490 #else
1491 /* For Darwin. */
1492 #undef TARGET_ASM_UNALIGNED_HI_OP
1493 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1494 #undef TARGET_ASM_UNALIGNED_SI_OP
1495 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1496 #undef TARGET_ASM_UNALIGNED_DI_OP
1497 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1498 #undef TARGET_ASM_ALIGNED_DI_OP
1499 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1500 #endif
1501 #endif
1503 /* This hook deals with fixups for relocatable code and DI-mode objects
1504 in 64-bit code. */
1505 #undef TARGET_ASM_INTEGER
1506 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1508 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1509 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1510 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1511 #endif
1513 #undef TARGET_SET_UP_BY_PROLOGUE
1514 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1516 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1517 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1519 #undef TARGET_INTERNAL_ARG_POINTER
1520 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1522 #undef TARGET_HAVE_TLS
1523 #define TARGET_HAVE_TLS HAVE_AS_TLS
1525 #undef TARGET_CANNOT_FORCE_CONST_MEM
1526 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1528 #undef TARGET_DELEGITIMIZE_ADDRESS
1529 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1531 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1532 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1534 #undef TARGET_ASM_FUNCTION_PROLOGUE
1535 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1536 #undef TARGET_ASM_FUNCTION_EPILOGUE
1537 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1539 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1540 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1542 #undef TARGET_LEGITIMIZE_ADDRESS
1543 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1545 #undef TARGET_SCHED_VARIABLE_ISSUE
1546 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1548 #undef TARGET_SCHED_ISSUE_RATE
1549 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1550 #undef TARGET_SCHED_ADJUST_COST
1551 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1552 #undef TARGET_SCHED_ADJUST_PRIORITY
1553 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1554 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1555 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1556 #undef TARGET_SCHED_INIT
1557 #define TARGET_SCHED_INIT rs6000_sched_init
1558 #undef TARGET_SCHED_FINISH
1559 #define TARGET_SCHED_FINISH rs6000_sched_finish
1560 #undef TARGET_SCHED_REORDER
1561 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1562 #undef TARGET_SCHED_REORDER2
1563 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1565 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1566 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1568 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1569 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1571 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1572 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1573 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1574 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1575 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1576 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1577 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1578 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1580 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1581 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1582 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1583 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1584 rs6000_builtin_support_vector_misalignment
1585 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1586 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1587 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1588 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1589 rs6000_builtin_vectorization_cost
1590 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1591 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1592 rs6000_preferred_simd_mode
1593 #undef TARGET_VECTORIZE_INIT_COST
1594 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1595 #undef TARGET_VECTORIZE_ADD_STMT_COST
1596 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1597 #undef TARGET_VECTORIZE_FINISH_COST
1598 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1599 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1600 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1602 #undef TARGET_INIT_BUILTINS
1603 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1604 #undef TARGET_BUILTIN_DECL
1605 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1607 #undef TARGET_FOLD_BUILTIN
1608 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1610 #undef TARGET_EXPAND_BUILTIN
1611 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1613 #undef TARGET_MANGLE_TYPE
1614 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1616 #undef TARGET_INIT_LIBFUNCS
1617 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1619 #if TARGET_MACHO
1620 #undef TARGET_BINDS_LOCAL_P
1621 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1622 #endif
1624 #undef TARGET_MS_BITFIELD_LAYOUT_P
1625 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1627 #undef TARGET_ASM_OUTPUT_MI_THUNK
1628 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1630 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1631 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1633 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1634 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1636 #undef TARGET_REGISTER_MOVE_COST
1637 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1638 #undef TARGET_MEMORY_MOVE_COST
1639 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1640 #undef TARGET_CANNOT_COPY_INSN_P
1641 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1642 #undef TARGET_RTX_COSTS
1643 #define TARGET_RTX_COSTS rs6000_rtx_costs
1644 #undef TARGET_ADDRESS_COST
1645 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1647 #undef TARGET_DWARF_REGISTER_SPAN
1648 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1650 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1651 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1653 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1654 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1656 #undef TARGET_PROMOTE_FUNCTION_MODE
1657 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1659 #undef TARGET_RETURN_IN_MEMORY
1660 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1662 #undef TARGET_RETURN_IN_MSB
1663 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1665 #undef TARGET_SETUP_INCOMING_VARARGS
1666 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1668 /* Always strict argument naming on rs6000. */
1669 #undef TARGET_STRICT_ARGUMENT_NAMING
1670 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1671 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1672 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1673 #undef TARGET_SPLIT_COMPLEX_ARG
1674 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1675 #undef TARGET_MUST_PASS_IN_STACK
1676 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1677 #undef TARGET_PASS_BY_REFERENCE
1678 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1679 #undef TARGET_ARG_PARTIAL_BYTES
1680 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1681 #undef TARGET_FUNCTION_ARG_ADVANCE
1682 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1683 #undef TARGET_FUNCTION_ARG
1684 #define TARGET_FUNCTION_ARG rs6000_function_arg
1685 #undef TARGET_FUNCTION_ARG_BOUNDARY
1686 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1688 #undef TARGET_BUILD_BUILTIN_VA_LIST
1689 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1691 #undef TARGET_EXPAND_BUILTIN_VA_START
1692 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1694 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1695 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1697 #undef TARGET_EH_RETURN_FILTER_MODE
1698 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1700 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1701 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1703 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1704 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1706 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1707 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1709 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1710 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1712 #undef TARGET_MD_ASM_ADJUST
1713 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1715 #undef TARGET_OPTION_OVERRIDE
1716 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1718 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1719 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1720 rs6000_builtin_vectorized_function
1722 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1723 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1724 rs6000_builtin_md_vectorized_function
1726 #if !TARGET_MACHO
1727 #undef TARGET_STACK_PROTECT_FAIL
1728 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1729 #endif
1731 #ifdef HAVE_AS_TLS
1732 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1733 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1734 #endif
1736 /* Use a 32-bit anchor range. This leads to sequences like:
1738 addis tmp,anchor,high
1739 add dest,tmp,low
1741 where tmp itself acts as an anchor, and can be shared between
1742 accesses to the same 64k page. */
1743 #undef TARGET_MIN_ANCHOR_OFFSET
1744 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1745 #undef TARGET_MAX_ANCHOR_OFFSET
1746 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1747 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1748 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1749 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1750 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1752 #undef TARGET_BUILTIN_RECIPROCAL
1753 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1755 #undef TARGET_EXPAND_TO_RTL_HOOK
1756 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1758 #undef TARGET_INSTANTIATE_DECLS
1759 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1761 #undef TARGET_SECONDARY_RELOAD
1762 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1764 #undef TARGET_LEGITIMATE_ADDRESS_P
1765 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1767 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1768 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1770 #undef TARGET_LRA_P
1771 #define TARGET_LRA_P rs6000_lra_p
1773 #undef TARGET_CAN_ELIMINATE
1774 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1776 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1777 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1779 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1780 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1782 #undef TARGET_TRAMPOLINE_INIT
1783 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1785 #undef TARGET_FUNCTION_VALUE
1786 #define TARGET_FUNCTION_VALUE rs6000_function_value
1788 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1789 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1791 #undef TARGET_OPTION_SAVE
1792 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1794 #undef TARGET_OPTION_RESTORE
1795 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1797 #undef TARGET_OPTION_PRINT
1798 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1800 #undef TARGET_CAN_INLINE_P
1801 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1803 #undef TARGET_SET_CURRENT_FUNCTION
1804 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1806 #undef TARGET_LEGITIMATE_CONSTANT_P
1807 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1809 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1810 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1812 #undef TARGET_CAN_USE_DOLOOP_P
1813 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1815 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1816 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1818 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1819 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1820 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1821 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1822 #undef TARGET_UNWIND_WORD_MODE
1823 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1825 #undef TARGET_OFFLOAD_OPTIONS
1826 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1828 #undef TARGET_C_MODE_FOR_SUFFIX
1829 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1831 #undef TARGET_INVALID_BINARY_OP
1832 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1834 #undef TARGET_OPTAB_SUPPORTED_P
1835 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1838 /* Processor table. */
1839 struct rs6000_ptt
1841 const char *const name; /* Canonical processor name. */
1842 const enum processor_type processor; /* Processor type enum value. */
1843 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1846 static struct rs6000_ptt const processor_target_table[] =
1848 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1849 #include "rs6000-cpus.def"
1850 #undef RS6000_CPU
1853 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1854 name is invalid. */
1856 static int
1857 rs6000_cpu_name_lookup (const char *name)
1859 size_t i;
1861 if (name != NULL)
1863 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1864 if (! strcmp (name, processor_target_table[i].name))
1865 return (int)i;
1868 return -1;
1872 /* Return number of consecutive hard regs needed starting at reg REGNO
1873 to hold something of mode MODE.
1874 This is ordinarily the length in words of a value of mode MODE
1875 but can be less for certain modes in special long registers.
1877 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1878 scalar instructions. The upper 32 bits are only available to the
1879 SIMD instructions.
1881 POWER and PowerPC GPRs hold 32 bits worth;
1882 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1884 static int
1885 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
1887 unsigned HOST_WIDE_INT reg_size;
1889 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
1890 128-bit floating point that can go in vector registers, which has VSX
1891 memory addressing. */
1892 if (FP_REGNO_P (regno))
1893 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
1894 ? UNITS_PER_VSX_WORD
1895 : UNITS_PER_FP_WORD);
1897 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1898 reg_size = UNITS_PER_SPE_WORD;
1900 else if (ALTIVEC_REGNO_P (regno))
1901 reg_size = UNITS_PER_ALTIVEC_WORD;
1903 /* The value returned for SCmode in the E500 double case is 2 for
1904 ABI compatibility; storing an SCmode value in a single register
1905 would require function_arg and rs6000_spe_function_arg to handle
1906 SCmode so as to pass the value correctly in a pair of
1907 registers. */
1908 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1909 && !DECIMAL_FLOAT_MODE_P (mode) && SPE_SIMD_REGNO_P (regno))
1910 reg_size = UNITS_PER_FP_WORD;
1912 else
1913 reg_size = UNITS_PER_WORD;
1915 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1918 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1919 MODE. */
1920 static int
1921 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
1923 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1925 if (COMPLEX_MODE_P (mode))
1926 mode = GET_MODE_INNER (mode);
1928 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1929 register combinations, and use PTImode where we need to deal with quad
1930 word memory operations. Don't allow quad words in the argument or frame
1931 pointer registers, just registers 0..31. */
1932 if (mode == PTImode)
1933 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1934 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1935 && ((regno & 1) == 0));
1937 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1938 implementations. Don't allow an item to be split between a FP register
1939 and an Altivec register. Allow TImode in all VSX registers if the user
1940 asked for it. */
1941 if (TARGET_VSX && VSX_REGNO_P (regno)
1942 && (VECTOR_MEM_VSX_P (mode)
1943 || FLOAT128_VECTOR_P (mode)
1944 || reg_addr[mode].scalar_in_vmx_p
1945 || (TARGET_VSX_TIMODE && mode == TImode)
1946 || (TARGET_VADDUQM && mode == V1TImode)
1947 || (TARGET_UPPER_REGS_DI && mode == DImode)))
1949 if (FP_REGNO_P (regno))
1950 return FP_REGNO_P (last_regno);
1952 if (ALTIVEC_REGNO_P (regno))
1954 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
1955 return 0;
1957 return ALTIVEC_REGNO_P (last_regno);
1961 /* The GPRs can hold any mode, but values bigger than one register
1962 cannot go past R31. */
1963 if (INT_REGNO_P (regno))
1964 return INT_REGNO_P (last_regno);
1966 /* The float registers (except for VSX vector modes) can only hold floating
1967 modes and DImode. */
1968 if (FP_REGNO_P (regno))
1970 if (FLOAT128_VECTOR_P (mode))
1971 return false;
1973 if (SCALAR_FLOAT_MODE_P (mode)
1974 && (mode != TDmode || (regno % 2) == 0)
1975 && FP_REGNO_P (last_regno))
1976 return 1;
1978 if (GET_MODE_CLASS (mode) == MODE_INT
1979 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1980 return 1;
1982 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1983 && PAIRED_VECTOR_MODE (mode))
1984 return 1;
1986 return 0;
1989 /* The CR register can only hold CC modes. */
1990 if (CR_REGNO_P (regno))
1991 return GET_MODE_CLASS (mode) == MODE_CC;
1993 if (CA_REGNO_P (regno))
1994 return mode == Pmode || mode == SImode;
1996 /* AltiVec only in AldyVec registers. */
1997 if (ALTIVEC_REGNO_P (regno))
1998 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1999 || mode == V1TImode);
2001 /* ...but GPRs can hold SIMD data on the SPE in one register. */
2002 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
2003 return 1;
2005 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2006 and it must be able to fit within the register set. */
2008 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2011 /* Print interesting facts about registers. */
2012 static void
2013 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2015 int r, m;
2017 for (r = first_regno; r <= last_regno; ++r)
2019 const char *comma = "";
2020 int len;
2022 if (first_regno == last_regno)
2023 fprintf (stderr, "%s:\t", reg_name);
2024 else
2025 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2027 len = 8;
2028 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2029 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2031 if (len > 70)
2033 fprintf (stderr, ",\n\t");
2034 len = 8;
2035 comma = "";
2038 if (rs6000_hard_regno_nregs[m][r] > 1)
2039 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2040 rs6000_hard_regno_nregs[m][r]);
2041 else
2042 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2044 comma = ", ";
2047 if (call_used_regs[r])
2049 if (len > 70)
2051 fprintf (stderr, ",\n\t");
2052 len = 8;
2053 comma = "";
2056 len += fprintf (stderr, "%s%s", comma, "call-used");
2057 comma = ", ";
2060 if (fixed_regs[r])
2062 if (len > 70)
2064 fprintf (stderr, ",\n\t");
2065 len = 8;
2066 comma = "";
2069 len += fprintf (stderr, "%s%s", comma, "fixed");
2070 comma = ", ";
2073 if (len > 70)
2075 fprintf (stderr, ",\n\t");
2076 comma = "";
2079 len += fprintf (stderr, "%sreg-class = %s", comma,
2080 reg_class_names[(int)rs6000_regno_regclass[r]]);
2081 comma = ", ";
2083 if (len > 70)
2085 fprintf (stderr, ",\n\t");
2086 comma = "";
2089 fprintf (stderr, "%sregno = %d\n", comma, r);
2093 static const char *
2094 rs6000_debug_vector_unit (enum rs6000_vector v)
2096 const char *ret;
2098 switch (v)
2100 case VECTOR_NONE: ret = "none"; break;
2101 case VECTOR_ALTIVEC: ret = "altivec"; break;
2102 case VECTOR_VSX: ret = "vsx"; break;
2103 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2104 case VECTOR_PAIRED: ret = "paired"; break;
2105 case VECTOR_SPE: ret = "spe"; break;
2106 case VECTOR_OTHER: ret = "other"; break;
2107 default: ret = "unknown"; break;
2110 return ret;
2113 /* Inner function printing just the address mask for a particular reload
2114 register class. */
2115 DEBUG_FUNCTION char *
2116 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2118 static char ret[8];
2119 char *p = ret;
2121 if ((mask & RELOAD_REG_VALID) != 0)
2122 *p++ = 'v';
2123 else if (keep_spaces)
2124 *p++ = ' ';
2126 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2127 *p++ = 'm';
2128 else if (keep_spaces)
2129 *p++ = ' ';
2131 if ((mask & RELOAD_REG_INDEXED) != 0)
2132 *p++ = 'i';
2133 else if (keep_spaces)
2134 *p++ = ' ';
2136 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2137 *p++ = 'O';
2138 else if ((mask & RELOAD_REG_OFFSET) != 0)
2139 *p++ = 'o';
2140 else if (keep_spaces)
2141 *p++ = ' ';
2143 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2144 *p++ = '+';
2145 else if (keep_spaces)
2146 *p++ = ' ';
2148 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2149 *p++ = '+';
2150 else if (keep_spaces)
2151 *p++ = ' ';
2153 if ((mask & RELOAD_REG_AND_M16) != 0)
2154 *p++ = '&';
2155 else if (keep_spaces)
2156 *p++ = ' ';
2158 *p = '\0';
2160 return ret;
2163 /* Print the address masks in a human readble fashion. */
2164 DEBUG_FUNCTION void
2165 rs6000_debug_print_mode (ssize_t m)
2167 ssize_t rc;
2168 int spaces = 0;
2169 bool fuse_extra_p;
2171 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2172 for (rc = 0; rc < N_RELOAD_REG; rc++)
2173 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2174 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2176 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2177 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2178 fprintf (stderr, " Reload=%c%c",
2179 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2180 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2181 else
2182 spaces += sizeof (" Reload=sl") - 1;
2184 if (reg_addr[m].scalar_in_vmx_p)
2186 fprintf (stderr, "%*s Upper=y", spaces, "");
2187 spaces = 0;
2189 else
2190 spaces += sizeof (" Upper=y") - 1;
2192 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2193 || reg_addr[m].fused_toc);
2194 if (!fuse_extra_p)
2196 for (rc = 0; rc < N_RELOAD_REG; rc++)
2198 if (rc != RELOAD_REG_ANY)
2200 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2201 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2202 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2203 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2204 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2206 fuse_extra_p = true;
2207 break;
2213 if (fuse_extra_p)
2215 fprintf (stderr, "%*s Fuse:", spaces, "");
2216 spaces = 0;
2218 for (rc = 0; rc < N_RELOAD_REG; rc++)
2220 if (rc != RELOAD_REG_ANY)
2222 char load, store;
2224 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2225 load = 'l';
2226 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2227 load = 'L';
2228 else
2229 load = '-';
2231 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2232 store = 's';
2233 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2234 store = 'S';
2235 else
2236 store = '-';
2238 if (load == '-' && store == '-')
2239 spaces += 5;
2240 else
2242 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2243 reload_reg_map[rc].name[0], load, store);
2244 spaces = 0;
2249 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2251 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2252 spaces = 0;
2254 else
2255 spaces += sizeof (" P8gpr") - 1;
2257 if (reg_addr[m].fused_toc)
2259 fprintf (stderr, "%*sToc", (spaces + 1), "");
2260 spaces = 0;
2262 else
2263 spaces += sizeof (" Toc") - 1;
2265 else
2266 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2268 if (rs6000_vector_unit[m] != VECTOR_NONE
2269 || rs6000_vector_mem[m] != VECTOR_NONE)
2271 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2272 spaces, "",
2273 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2274 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2277 fputs ("\n", stderr);
2280 #define DEBUG_FMT_ID "%-32s= "
2281 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2282 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2283 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2285 /* Print various interesting information with -mdebug=reg. */
2286 static void
2287 rs6000_debug_reg_global (void)
2289 static const char *const tf[2] = { "false", "true" };
2290 const char *nl = (const char *)0;
2291 int m;
2292 size_t m1, m2, v;
2293 char costly_num[20];
2294 char nop_num[20];
2295 char flags_buffer[40];
2296 const char *costly_str;
2297 const char *nop_str;
2298 const char *trace_str;
2299 const char *abi_str;
2300 const char *cmodel_str;
2301 struct cl_target_option cl_opts;
2303 /* Modes we want tieable information on. */
2304 static const machine_mode print_tieable_modes[] = {
2305 QImode,
2306 HImode,
2307 SImode,
2308 DImode,
2309 TImode,
2310 PTImode,
2311 SFmode,
2312 DFmode,
2313 TFmode,
2314 IFmode,
2315 KFmode,
2316 SDmode,
2317 DDmode,
2318 TDmode,
2319 V8QImode,
2320 V4HImode,
2321 V2SImode,
2322 V16QImode,
2323 V8HImode,
2324 V4SImode,
2325 V2DImode,
2326 V1TImode,
2327 V32QImode,
2328 V16HImode,
2329 V8SImode,
2330 V4DImode,
2331 V2TImode,
2332 V2SFmode,
2333 V4SFmode,
2334 V2DFmode,
2335 V8SFmode,
2336 V4DFmode,
2337 CCmode,
2338 CCUNSmode,
2339 CCEQmode,
2342 /* Virtual regs we are interested in. */
2343 const static struct {
2344 int regno; /* register number. */
2345 const char *name; /* register name. */
2346 } virtual_regs[] = {
2347 { STACK_POINTER_REGNUM, "stack pointer:" },
2348 { TOC_REGNUM, "toc: " },
2349 { STATIC_CHAIN_REGNUM, "static chain: " },
2350 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2351 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2352 { ARG_POINTER_REGNUM, "arg pointer: " },
2353 { FRAME_POINTER_REGNUM, "frame pointer:" },
2354 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2355 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2356 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2357 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2358 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2359 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2360 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2361 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2362 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2365 fputs ("\nHard register information:\n", stderr);
2366 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2367 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2368 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2369 LAST_ALTIVEC_REGNO,
2370 "vs");
2371 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2372 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2373 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2374 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2375 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2376 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2377 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2378 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2380 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2381 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2382 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2384 fprintf (stderr,
2385 "\n"
2386 "d reg_class = %s\n"
2387 "f reg_class = %s\n"
2388 "v reg_class = %s\n"
2389 "wa reg_class = %s\n"
2390 "wb reg_class = %s\n"
2391 "wd reg_class = %s\n"
2392 "we reg_class = %s\n"
2393 "wf reg_class = %s\n"
2394 "wg reg_class = %s\n"
2395 "wh reg_class = %s\n"
2396 "wi reg_class = %s\n"
2397 "wj reg_class = %s\n"
2398 "wk reg_class = %s\n"
2399 "wl reg_class = %s\n"
2400 "wm reg_class = %s\n"
2401 "wo reg_class = %s\n"
2402 "wp reg_class = %s\n"
2403 "wq reg_class = %s\n"
2404 "wr reg_class = %s\n"
2405 "ws reg_class = %s\n"
2406 "wt reg_class = %s\n"
2407 "wu reg_class = %s\n"
2408 "wv reg_class = %s\n"
2409 "ww reg_class = %s\n"
2410 "wx reg_class = %s\n"
2411 "wy reg_class = %s\n"
2412 "wz reg_class = %s\n"
2413 "\n",
2414 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2415 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2416 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2417 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2418 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2419 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2420 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2421 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2422 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2423 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2424 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2425 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2426 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2427 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2428 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2429 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2430 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2431 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2432 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2433 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2434 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2435 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2436 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2437 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2438 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2439 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2440 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2442 nl = "\n";
2443 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2444 rs6000_debug_print_mode (m);
2446 fputs ("\n", stderr);
2448 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2450 machine_mode mode1 = print_tieable_modes[m1];
2451 bool first_time = true;
2453 nl = (const char *)0;
2454 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2456 machine_mode mode2 = print_tieable_modes[m2];
2457 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2459 if (first_time)
2461 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2462 nl = "\n";
2463 first_time = false;
2466 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2470 if (!first_time)
2471 fputs ("\n", stderr);
2474 if (nl)
2475 fputs (nl, stderr);
2477 if (rs6000_recip_control)
2479 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2481 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2482 if (rs6000_recip_bits[m])
2484 fprintf (stderr,
2485 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2486 GET_MODE_NAME (m),
2487 (RS6000_RECIP_AUTO_RE_P (m)
2488 ? "auto"
2489 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2490 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2491 ? "auto"
2492 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2495 fputs ("\n", stderr);
2498 if (rs6000_cpu_index >= 0)
2500 const char *name = processor_target_table[rs6000_cpu_index].name;
2501 HOST_WIDE_INT flags
2502 = processor_target_table[rs6000_cpu_index].target_enable;
2504 sprintf (flags_buffer, "-mcpu=%s flags", name);
2505 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2507 else
2508 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2510 if (rs6000_tune_index >= 0)
2512 const char *name = processor_target_table[rs6000_tune_index].name;
2513 HOST_WIDE_INT flags
2514 = processor_target_table[rs6000_tune_index].target_enable;
2516 sprintf (flags_buffer, "-mtune=%s flags", name);
2517 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2519 else
2520 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2522 cl_target_option_save (&cl_opts, &global_options);
2523 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2524 rs6000_isa_flags);
2526 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2527 rs6000_isa_flags_explicit);
2529 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2530 rs6000_builtin_mask);
2532 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2534 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2535 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2537 switch (rs6000_sched_costly_dep)
2539 case max_dep_latency:
2540 costly_str = "max_dep_latency";
2541 break;
2543 case no_dep_costly:
2544 costly_str = "no_dep_costly";
2545 break;
2547 case all_deps_costly:
2548 costly_str = "all_deps_costly";
2549 break;
2551 case true_store_to_load_dep_costly:
2552 costly_str = "true_store_to_load_dep_costly";
2553 break;
2555 case store_to_load_dep_costly:
2556 costly_str = "store_to_load_dep_costly";
2557 break;
2559 default:
2560 costly_str = costly_num;
2561 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2562 break;
2565 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2567 switch (rs6000_sched_insert_nops)
2569 case sched_finish_regroup_exact:
2570 nop_str = "sched_finish_regroup_exact";
2571 break;
2573 case sched_finish_pad_groups:
2574 nop_str = "sched_finish_pad_groups";
2575 break;
2577 case sched_finish_none:
2578 nop_str = "sched_finish_none";
2579 break;
2581 default:
2582 nop_str = nop_num;
2583 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2584 break;
2587 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2589 switch (rs6000_sdata)
2591 default:
2592 case SDATA_NONE:
2593 break;
2595 case SDATA_DATA:
2596 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2597 break;
2599 case SDATA_SYSV:
2600 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2601 break;
2603 case SDATA_EABI:
2604 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2605 break;
2609 switch (rs6000_traceback)
2611 case traceback_default: trace_str = "default"; break;
2612 case traceback_none: trace_str = "none"; break;
2613 case traceback_part: trace_str = "part"; break;
2614 case traceback_full: trace_str = "full"; break;
2615 default: trace_str = "unknown"; break;
2618 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2620 switch (rs6000_current_cmodel)
2622 case CMODEL_SMALL: cmodel_str = "small"; break;
2623 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2624 case CMODEL_LARGE: cmodel_str = "large"; break;
2625 default: cmodel_str = "unknown"; break;
2628 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2630 switch (rs6000_current_abi)
2632 case ABI_NONE: abi_str = "none"; break;
2633 case ABI_AIX: abi_str = "aix"; break;
2634 case ABI_ELFv2: abi_str = "ELFv2"; break;
2635 case ABI_V4: abi_str = "V4"; break;
2636 case ABI_DARWIN: abi_str = "darwin"; break;
2637 default: abi_str = "unknown"; break;
2640 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2642 if (rs6000_altivec_abi)
2643 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2645 if (rs6000_spe_abi)
2646 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2648 if (rs6000_darwin64_abi)
2649 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2651 if (rs6000_float_gprs)
2652 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2654 fprintf (stderr, DEBUG_FMT_S, "fprs",
2655 (TARGET_FPRS ? "true" : "false"));
2657 fprintf (stderr, DEBUG_FMT_S, "single_float",
2658 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2660 fprintf (stderr, DEBUG_FMT_S, "double_float",
2661 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2663 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2664 (TARGET_SOFT_FLOAT ? "true" : "false"));
2666 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2667 (TARGET_E500_SINGLE ? "true" : "false"));
2669 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2670 (TARGET_E500_DOUBLE ? "true" : "false"));
2672 if (TARGET_LINK_STACK)
2673 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2675 fprintf (stderr, DEBUG_FMT_S, "lra", TARGET_LRA ? "true" : "false");
2677 if (TARGET_P8_FUSION)
2679 char options[80];
2681 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2682 if (TARGET_TOC_FUSION)
2683 strcat (options, ", toc");
2685 if (TARGET_P8_FUSION_SIGN)
2686 strcat (options, ", sign");
2688 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2691 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2692 TARGET_SECURE_PLT ? "secure" : "bss");
2693 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2694 aix_struct_return ? "aix" : "sysv");
2695 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2696 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2697 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2698 tf[!!rs6000_align_branch_targets]);
2699 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2700 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2701 rs6000_long_double_type_size);
2702 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2703 (int)rs6000_sched_restricted_insns_priority);
2704 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2705 (int)END_BUILTINS);
2706 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2707 (int)RS6000_BUILTIN_COUNT);
2709 if (TARGET_VSX)
2710 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2711 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2713 if (TARGET_DIRECT_MOVE_128)
2714 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2715 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2719 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2720 legitimate address support to figure out the appropriate addressing to
2721 use. */
2723 static void
2724 rs6000_setup_reg_addr_masks (void)
2726 ssize_t rc, reg, m, nregs;
2727 addr_mask_type any_addr_mask, addr_mask;
2729 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2731 machine_mode m2 = (machine_mode) m;
2732 bool complex_p = false;
2733 size_t msize;
2735 if (COMPLEX_MODE_P (m2))
2737 complex_p = true;
2738 m2 = GET_MODE_INNER (m2);
2741 msize = GET_MODE_SIZE (m2);
2743 /* SDmode is special in that we want to access it only via REG+REG
2744 addressing on power7 and above, since we want to use the LFIWZX and
2745 STFIWZX instructions to load it. */
2746 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2748 any_addr_mask = 0;
2749 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2751 addr_mask = 0;
2752 reg = reload_reg_map[rc].reg;
2754 /* Can mode values go in the GPR/FPR/Altivec registers? */
2755 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2757 nregs = rs6000_hard_regno_nregs[m][reg];
2758 addr_mask |= RELOAD_REG_VALID;
2760 /* Indicate if the mode takes more than 1 physical register. If
2761 it takes a single register, indicate it can do REG+REG
2762 addressing. */
2763 if (nregs > 1 || m == BLKmode || complex_p)
2764 addr_mask |= RELOAD_REG_MULTIPLE;
2765 else
2766 addr_mask |= RELOAD_REG_INDEXED;
2768 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2769 addressing. Restrict addressing on SPE for 64-bit types
2770 because of the SUBREG hackery used to address 64-bit floats in
2771 '32-bit' GPRs. If we allow scalars into Altivec registers,
2772 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2774 if (TARGET_UPDATE
2775 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2776 && msize <= 8
2777 && !VECTOR_MODE_P (m2)
2778 && !FLOAT128_VECTOR_P (m2)
2779 && !complex_p
2780 && (m2 != DFmode || !TARGET_UPPER_REGS_DF)
2781 && (m2 != SFmode || !TARGET_UPPER_REGS_SF)
2782 && !(TARGET_E500_DOUBLE && msize == 8))
2784 addr_mask |= RELOAD_REG_PRE_INCDEC;
2786 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2787 we don't allow PRE_MODIFY for some multi-register
2788 operations. */
2789 switch (m)
2791 default:
2792 addr_mask |= RELOAD_REG_PRE_MODIFY;
2793 break;
2795 case DImode:
2796 if (TARGET_POWERPC64)
2797 addr_mask |= RELOAD_REG_PRE_MODIFY;
2798 break;
2800 case DFmode:
2801 case DDmode:
2802 if (TARGET_DF_INSN)
2803 addr_mask |= RELOAD_REG_PRE_MODIFY;
2804 break;
2809 /* GPR and FPR registers can do REG+OFFSET addressing, except
2810 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2811 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2812 if ((addr_mask != 0) && !indexed_only_p
2813 && msize <= 8
2814 && (rc == RELOAD_REG_GPR
2815 || ((msize == 8 || m2 == SFmode)
2816 && (rc == RELOAD_REG_FPR
2817 || (rc == RELOAD_REG_VMX
2818 && TARGET_P9_DFORM_SCALAR)))))
2819 addr_mask |= RELOAD_REG_OFFSET;
2821 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2822 instructions are enabled. The offset for 128-bit VSX registers is
2823 only 12-bits. While GPRs can handle the full offset range, VSX
2824 registers can only handle the restricted range. */
2825 else if ((addr_mask != 0) && !indexed_only_p
2826 && msize == 16 && TARGET_P9_DFORM_VECTOR
2827 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2828 || (m2 == TImode && TARGET_VSX_TIMODE)))
2830 addr_mask |= RELOAD_REG_OFFSET;
2831 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2832 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2835 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2836 addressing on 128-bit types. */
2837 if (rc == RELOAD_REG_VMX && msize == 16
2838 && (addr_mask & RELOAD_REG_VALID) != 0)
2839 addr_mask |= RELOAD_REG_AND_M16;
2841 reg_addr[m].addr_mask[rc] = addr_mask;
2842 any_addr_mask |= addr_mask;
2845 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2850 /* Initialize the various global tables that are based on register size. */
2851 static void
2852 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2854 ssize_t r, m, c;
2855 int align64;
2856 int align32;
2858 /* Precalculate REGNO_REG_CLASS. */
2859 rs6000_regno_regclass[0] = GENERAL_REGS;
2860 for (r = 1; r < 32; ++r)
2861 rs6000_regno_regclass[r] = BASE_REGS;
2863 for (r = 32; r < 64; ++r)
2864 rs6000_regno_regclass[r] = FLOAT_REGS;
2866 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2867 rs6000_regno_regclass[r] = NO_REGS;
2869 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2870 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2872 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2873 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2874 rs6000_regno_regclass[r] = CR_REGS;
2876 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2877 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2878 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2879 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2880 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2881 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2882 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2883 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2884 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2885 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2886 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2887 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2889 /* Precalculate register class to simpler reload register class. We don't
2890 need all of the register classes that are combinations of different
2891 classes, just the simple ones that have constraint letters. */
2892 for (c = 0; c < N_REG_CLASSES; c++)
2893 reg_class_to_reg_type[c] = NO_REG_TYPE;
2895 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2896 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2897 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2898 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2899 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2900 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2901 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2902 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2903 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2904 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2905 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2906 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2908 if (TARGET_VSX)
2910 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2911 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2913 else
2915 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2916 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2919 /* Precalculate the valid memory formats as well as the vector information,
2920 this must be set up before the rs6000_hard_regno_nregs_internal calls
2921 below. */
2922 gcc_assert ((int)VECTOR_NONE == 0);
2923 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2924 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2926 gcc_assert ((int)CODE_FOR_nothing == 0);
2927 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2929 gcc_assert ((int)NO_REGS == 0);
2930 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2932 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2933 believes it can use native alignment or still uses 128-bit alignment. */
2934 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2936 align64 = 64;
2937 align32 = 32;
2939 else
2941 align64 = 128;
2942 align32 = 128;
2945 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
2946 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
2947 if (TARGET_FLOAT128)
2949 rs6000_vector_mem[KFmode] = VECTOR_VSX;
2950 rs6000_vector_align[KFmode] = 128;
2952 if (FLOAT128_IEEE_P (TFmode))
2954 rs6000_vector_mem[TFmode] = VECTOR_VSX;
2955 rs6000_vector_align[TFmode] = 128;
2959 /* V2DF mode, VSX only. */
2960 if (TARGET_VSX)
2962 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2963 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2964 rs6000_vector_align[V2DFmode] = align64;
2967 /* V4SF mode, either VSX or Altivec. */
2968 if (TARGET_VSX)
2970 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2971 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2972 rs6000_vector_align[V4SFmode] = align32;
2974 else if (TARGET_ALTIVEC)
2976 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2977 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2978 rs6000_vector_align[V4SFmode] = align32;
2981 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2982 and stores. */
2983 if (TARGET_ALTIVEC)
2985 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2986 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2987 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2988 rs6000_vector_align[V4SImode] = align32;
2989 rs6000_vector_align[V8HImode] = align32;
2990 rs6000_vector_align[V16QImode] = align32;
2992 if (TARGET_VSX)
2994 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2995 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2996 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2998 else
3000 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3001 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3002 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3006 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3007 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3008 if (TARGET_VSX)
3010 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3011 rs6000_vector_unit[V2DImode]
3012 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3013 rs6000_vector_align[V2DImode] = align64;
3015 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3016 rs6000_vector_unit[V1TImode]
3017 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3018 rs6000_vector_align[V1TImode] = 128;
3021 /* DFmode, see if we want to use the VSX unit. Memory is handled
3022 differently, so don't set rs6000_vector_mem. */
3023 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
3025 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3026 rs6000_vector_align[DFmode] = 64;
3029 /* SFmode, see if we want to use the VSX unit. */
3030 if (TARGET_P8_VECTOR && TARGET_VSX_SCALAR_FLOAT)
3032 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3033 rs6000_vector_align[SFmode] = 32;
3036 /* Allow TImode in VSX register and set the VSX memory macros. */
3037 if (TARGET_VSX && TARGET_VSX_TIMODE)
3039 rs6000_vector_mem[TImode] = VECTOR_VSX;
3040 rs6000_vector_align[TImode] = align64;
3043 /* TODO add SPE and paired floating point vector support. */
3045 /* Register class constraints for the constraints that depend on compile
3046 switches. When the VSX code was added, different constraints were added
3047 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3048 of the VSX registers are used. The register classes for scalar floating
3049 point types is set, based on whether we allow that type into the upper
3050 (Altivec) registers. GCC has register classes to target the Altivec
3051 registers for load/store operations, to select using a VSX memory
3052 operation instead of the traditional floating point operation. The
3053 constraints are:
3055 d - Register class to use with traditional DFmode instructions.
3056 f - Register class to use with traditional SFmode instructions.
3057 v - Altivec register.
3058 wa - Any VSX register.
3059 wc - Reserved to represent individual CR bits (used in LLVM).
3060 wd - Preferred register class for V2DFmode.
3061 wf - Preferred register class for V4SFmode.
3062 wg - Float register for power6x move insns.
3063 wh - FP register for direct move instructions.
3064 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3065 wj - FP or VSX register to hold 64-bit integers for direct moves.
3066 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3067 wl - Float register if we can do 32-bit signed int loads.
3068 wm - VSX register for ISA 2.07 direct move operations.
3069 wn - always NO_REGS.
3070 wr - GPR if 64-bit mode is permitted.
3071 ws - Register class to do ISA 2.06 DF operations.
3072 wt - VSX register for TImode in VSX registers.
3073 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3074 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3075 ww - Register class to do SF conversions in with VSX operations.
3076 wx - Float register if we can do 32-bit int stores.
3077 wy - Register class to do ISA 2.07 SF operations.
3078 wz - Float register if we can do 32-bit unsigned int loads. */
3080 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3081 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3083 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3084 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3086 if (TARGET_VSX)
3088 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3089 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3090 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3092 if (TARGET_VSX_TIMODE)
3093 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3095 if (TARGET_UPPER_REGS_DF) /* DFmode */
3097 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
3098 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
3100 else
3101 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
3103 if (TARGET_UPPER_REGS_DF) /* DImode */
3104 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS;
3105 else
3106 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS;
3109 /* Add conditional constraints based on various options, to allow us to
3110 collapse multiple insn patterns. */
3111 if (TARGET_ALTIVEC)
3112 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3114 if (TARGET_MFPGPR) /* DFmode */
3115 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3117 if (TARGET_LFIWAX)
3118 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3120 if (TARGET_DIRECT_MOVE)
3122 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3123 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3124 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3125 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3126 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3127 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3130 if (TARGET_POWERPC64)
3131 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3133 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
3135 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3136 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3137 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3139 else if (TARGET_P8_VECTOR)
3141 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
3142 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3144 else if (TARGET_VSX)
3145 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3147 if (TARGET_STFIWX)
3148 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3150 if (TARGET_LFIWZX)
3151 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3153 if (TARGET_FLOAT128)
3155 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3156 if (FLOAT128_IEEE_P (TFmode))
3157 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3160 /* Support for new D-form instructions. */
3161 if (TARGET_P9_DFORM_SCALAR)
3162 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3164 /* Support for ISA 3.0 (power9) vectors. */
3165 if (TARGET_P9_VECTOR)
3166 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3168 /* Support for new direct moves (ISA 3.0 + 64bit). */
3169 if (TARGET_DIRECT_MOVE_128)
3170 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3172 /* Set up the reload helper and direct move functions. */
3173 if (TARGET_VSX || TARGET_ALTIVEC)
3175 if (TARGET_64BIT)
3177 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3178 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3179 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3180 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3181 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3182 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3183 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3184 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3185 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3186 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3187 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3188 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3189 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3190 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3191 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3192 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3193 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3194 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3195 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3196 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3198 if (FLOAT128_VECTOR_P (KFmode))
3200 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3201 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3204 if (FLOAT128_VECTOR_P (TFmode))
3206 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3207 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3210 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3211 available. */
3212 if (TARGET_NO_SDMODE_STACK)
3214 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3215 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3218 if (TARGET_VSX_TIMODE)
3220 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3221 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3224 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3226 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3227 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3228 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3229 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3230 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3231 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3232 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3233 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3234 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3236 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3237 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3238 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3239 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3240 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3241 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3242 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3243 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3244 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3246 if (FLOAT128_VECTOR_P (KFmode))
3248 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3249 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3252 if (FLOAT128_VECTOR_P (TFmode))
3254 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3255 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3259 else
3261 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3262 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3263 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3264 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3265 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3266 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3267 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3268 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3269 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3270 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3271 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3272 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3273 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3274 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3275 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3276 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3277 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3278 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3279 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3280 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3282 if (FLOAT128_VECTOR_P (KFmode))
3284 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3285 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3288 if (FLOAT128_IEEE_P (TFmode))
3290 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3291 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3294 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3295 available. */
3296 if (TARGET_NO_SDMODE_STACK)
3298 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3299 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3302 if (TARGET_VSX_TIMODE)
3304 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3305 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3308 if (TARGET_DIRECT_MOVE)
3310 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3311 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3312 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3316 if (TARGET_UPPER_REGS_DF)
3317 reg_addr[DFmode].scalar_in_vmx_p = true;
3319 if (TARGET_UPPER_REGS_DI)
3320 reg_addr[DImode].scalar_in_vmx_p = true;
3322 if (TARGET_UPPER_REGS_SF)
3323 reg_addr[SFmode].scalar_in_vmx_p = true;
3326 /* Setup the fusion operations. */
3327 if (TARGET_P8_FUSION)
3329 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3330 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3331 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3332 if (TARGET_64BIT)
3333 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3336 if (TARGET_P9_FUSION)
3338 struct fuse_insns {
3339 enum machine_mode mode; /* mode of the fused type. */
3340 enum machine_mode pmode; /* pointer mode. */
3341 enum rs6000_reload_reg_type rtype; /* register type. */
3342 enum insn_code load; /* load insn. */
3343 enum insn_code store; /* store insn. */
3346 static const struct fuse_insns addis_insns[] = {
3347 { SFmode, DImode, RELOAD_REG_FPR,
3348 CODE_FOR_fusion_fpr_di_sf_load,
3349 CODE_FOR_fusion_fpr_di_sf_store },
3351 { SFmode, SImode, RELOAD_REG_FPR,
3352 CODE_FOR_fusion_fpr_si_sf_load,
3353 CODE_FOR_fusion_fpr_si_sf_store },
3355 { DFmode, DImode, RELOAD_REG_FPR,
3356 CODE_FOR_fusion_fpr_di_df_load,
3357 CODE_FOR_fusion_fpr_di_df_store },
3359 { DFmode, SImode, RELOAD_REG_FPR,
3360 CODE_FOR_fusion_fpr_si_df_load,
3361 CODE_FOR_fusion_fpr_si_df_store },
3363 { DImode, DImode, RELOAD_REG_FPR,
3364 CODE_FOR_fusion_fpr_di_di_load,
3365 CODE_FOR_fusion_fpr_di_di_store },
3367 { DImode, SImode, RELOAD_REG_FPR,
3368 CODE_FOR_fusion_fpr_si_di_load,
3369 CODE_FOR_fusion_fpr_si_di_store },
3371 { QImode, DImode, RELOAD_REG_GPR,
3372 CODE_FOR_fusion_gpr_di_qi_load,
3373 CODE_FOR_fusion_gpr_di_qi_store },
3375 { QImode, SImode, RELOAD_REG_GPR,
3376 CODE_FOR_fusion_gpr_si_qi_load,
3377 CODE_FOR_fusion_gpr_si_qi_store },
3379 { HImode, DImode, RELOAD_REG_GPR,
3380 CODE_FOR_fusion_gpr_di_hi_load,
3381 CODE_FOR_fusion_gpr_di_hi_store },
3383 { HImode, SImode, RELOAD_REG_GPR,
3384 CODE_FOR_fusion_gpr_si_hi_load,
3385 CODE_FOR_fusion_gpr_si_hi_store },
3387 { SImode, DImode, RELOAD_REG_GPR,
3388 CODE_FOR_fusion_gpr_di_si_load,
3389 CODE_FOR_fusion_gpr_di_si_store },
3391 { SImode, SImode, RELOAD_REG_GPR,
3392 CODE_FOR_fusion_gpr_si_si_load,
3393 CODE_FOR_fusion_gpr_si_si_store },
3395 { SFmode, DImode, RELOAD_REG_GPR,
3396 CODE_FOR_fusion_gpr_di_sf_load,
3397 CODE_FOR_fusion_gpr_di_sf_store },
3399 { SFmode, SImode, RELOAD_REG_GPR,
3400 CODE_FOR_fusion_gpr_si_sf_load,
3401 CODE_FOR_fusion_gpr_si_sf_store },
3403 { DImode, DImode, RELOAD_REG_GPR,
3404 CODE_FOR_fusion_gpr_di_di_load,
3405 CODE_FOR_fusion_gpr_di_di_store },
3407 { DFmode, DImode, RELOAD_REG_GPR,
3408 CODE_FOR_fusion_gpr_di_df_load,
3409 CODE_FOR_fusion_gpr_di_df_store },
3412 enum machine_mode cur_pmode = Pmode;
3413 size_t i;
3415 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3417 enum machine_mode xmode = addis_insns[i].mode;
3418 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3420 if (addis_insns[i].pmode != cur_pmode)
3421 continue;
3423 if (rtype == RELOAD_REG_FPR
3424 && (!TARGET_HARD_FLOAT || !TARGET_FPRS))
3425 continue;
3427 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3428 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3432 /* Note which types we support fusing TOC setup plus memory insn. We only do
3433 fused TOCs for medium/large code models. */
3434 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3435 && (TARGET_CMODEL != CMODEL_SMALL))
3437 reg_addr[QImode].fused_toc = true;
3438 reg_addr[HImode].fused_toc = true;
3439 reg_addr[SImode].fused_toc = true;
3440 reg_addr[DImode].fused_toc = true;
3441 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3443 if (TARGET_SINGLE_FLOAT)
3444 reg_addr[SFmode].fused_toc = true;
3445 if (TARGET_DOUBLE_FLOAT)
3446 reg_addr[DFmode].fused_toc = true;
3450 /* Precalculate HARD_REGNO_NREGS. */
3451 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3452 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3453 rs6000_hard_regno_nregs[m][r]
3454 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3456 /* Precalculate HARD_REGNO_MODE_OK. */
3457 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3458 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3459 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
3460 rs6000_hard_regno_mode_ok_p[m][r] = true;
3462 /* Precalculate CLASS_MAX_NREGS sizes. */
3463 for (c = 0; c < LIM_REG_CLASSES; ++c)
3465 int reg_size;
3467 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3468 reg_size = UNITS_PER_VSX_WORD;
3470 else if (c == ALTIVEC_REGS)
3471 reg_size = UNITS_PER_ALTIVEC_WORD;
3473 else if (c == FLOAT_REGS)
3474 reg_size = UNITS_PER_FP_WORD;
3476 else
3477 reg_size = UNITS_PER_WORD;
3479 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3481 machine_mode m2 = (machine_mode)m;
3482 int reg_size2 = reg_size;
3484 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3485 in VSX. */
3486 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3487 reg_size2 = UNITS_PER_FP_WORD;
3489 rs6000_class_max_nregs[m][c]
3490 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3494 if (TARGET_E500_DOUBLE)
3495 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
3497 /* Calculate which modes to automatically generate code to use a the
3498 reciprocal divide and square root instructions. In the future, possibly
3499 automatically generate the instructions even if the user did not specify
3500 -mrecip. The older machines double precision reciprocal sqrt estimate is
3501 not accurate enough. */
3502 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3503 if (TARGET_FRES)
3504 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3505 if (TARGET_FRE)
3506 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3507 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3508 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3509 if (VECTOR_UNIT_VSX_P (V2DFmode))
3510 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3512 if (TARGET_FRSQRTES)
3513 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3514 if (TARGET_FRSQRTE)
3515 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3516 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3517 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3518 if (VECTOR_UNIT_VSX_P (V2DFmode))
3519 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3521 if (rs6000_recip_control)
3523 if (!flag_finite_math_only)
3524 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
3525 if (flag_trapping_math)
3526 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
3527 if (!flag_reciprocal_math)
3528 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
3529 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3531 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3532 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3533 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3535 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3536 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3537 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3539 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3540 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3541 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3543 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3544 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3545 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3547 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3548 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3549 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3551 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3552 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3553 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3555 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3556 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3557 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3560 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3561 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3565 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3566 legitimate address support to figure out the appropriate addressing to
3567 use. */
3568 rs6000_setup_reg_addr_masks ();
3570 if (global_init_p || TARGET_DEBUG_TARGET)
3572 if (TARGET_DEBUG_REG)
3573 rs6000_debug_reg_global ();
3575 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3576 fprintf (stderr,
3577 "SImode variable mult cost = %d\n"
3578 "SImode constant mult cost = %d\n"
3579 "SImode short constant mult cost = %d\n"
3580 "DImode multipliciation cost = %d\n"
3581 "SImode division cost = %d\n"
3582 "DImode division cost = %d\n"
3583 "Simple fp operation cost = %d\n"
3584 "DFmode multiplication cost = %d\n"
3585 "SFmode division cost = %d\n"
3586 "DFmode division cost = %d\n"
3587 "cache line size = %d\n"
3588 "l1 cache size = %d\n"
3589 "l2 cache size = %d\n"
3590 "simultaneous prefetches = %d\n"
3591 "\n",
3592 rs6000_cost->mulsi,
3593 rs6000_cost->mulsi_const,
3594 rs6000_cost->mulsi_const9,
3595 rs6000_cost->muldi,
3596 rs6000_cost->divsi,
3597 rs6000_cost->divdi,
3598 rs6000_cost->fp,
3599 rs6000_cost->dmul,
3600 rs6000_cost->sdiv,
3601 rs6000_cost->ddiv,
3602 rs6000_cost->cache_line_size,
3603 rs6000_cost->l1_cache_size,
3604 rs6000_cost->l2_cache_size,
3605 rs6000_cost->simultaneous_prefetches);
3609 #if TARGET_MACHO
3610 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3612 static void
3613 darwin_rs6000_override_options (void)
3615 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3616 off. */
3617 rs6000_altivec_abi = 1;
3618 TARGET_ALTIVEC_VRSAVE = 1;
3619 rs6000_current_abi = ABI_DARWIN;
3621 if (DEFAULT_ABI == ABI_DARWIN
3622 && TARGET_64BIT)
3623 darwin_one_byte_bool = 1;
3625 if (TARGET_64BIT && ! TARGET_POWERPC64)
3627 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3628 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3630 if (flag_mkernel)
3632 rs6000_default_long_calls = 1;
3633 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3636 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3637 Altivec. */
3638 if (!flag_mkernel && !flag_apple_kext
3639 && TARGET_64BIT
3640 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3641 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3643 /* Unless the user (not the configurer) has explicitly overridden
3644 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3645 G4 unless targeting the kernel. */
3646 if (!flag_mkernel
3647 && !flag_apple_kext
3648 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3649 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3650 && ! global_options_set.x_rs6000_cpu_index)
3652 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3655 #endif
3657 /* If not otherwise specified by a target, make 'long double' equivalent to
3658 'double'. */
3660 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3661 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3662 #endif
3664 /* Return the builtin mask of the various options used that could affect which
3665 builtins were used. In the past we used target_flags, but we've run out of
3666 bits, and some options like SPE and PAIRED are no longer in
3667 target_flags. */
3669 HOST_WIDE_INT
3670 rs6000_builtin_mask_calculate (void)
3672 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3673 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3674 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3675 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3676 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3677 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3678 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3679 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3680 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3681 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3682 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3683 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3684 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3685 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3686 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3687 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3688 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3689 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3690 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3691 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3692 | ((TARGET_FLOAT128) ? RS6000_BTM_FLOAT128 : 0));
3695 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3696 to clobber the XER[CA] bit because clobbering that bit without telling
3697 the compiler worked just fine with versions of GCC before GCC 5, and
3698 breaking a lot of older code in ways that are hard to track down is
3699 not such a great idea. */
3701 static rtx_insn *
3702 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3703 vec<const char *> &/*constraints*/,
3704 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3706 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3707 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3708 return NULL;
3711 /* Override command line options. Mostly we process the processor type and
3712 sometimes adjust other TARGET_ options. */
3714 static bool
3715 rs6000_option_override_internal (bool global_init_p)
3717 bool ret = true;
3718 bool have_cpu = false;
3720 /* The default cpu requested at configure time, if any. */
3721 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3723 HOST_WIDE_INT set_masks;
3724 int cpu_index;
3725 int tune_index;
3726 struct cl_target_option *main_target_opt
3727 = ((global_init_p || target_option_default_node == NULL)
3728 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3730 /* Print defaults. */
3731 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3732 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3734 /* Remember the explicit arguments. */
3735 if (global_init_p)
3736 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3738 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3739 library functions, so warn about it. The flag may be useful for
3740 performance studies from time to time though, so don't disable it
3741 entirely. */
3742 if (global_options_set.x_rs6000_alignment_flags
3743 && rs6000_alignment_flags == MASK_ALIGN_POWER
3744 && DEFAULT_ABI == ABI_DARWIN
3745 && TARGET_64BIT)
3746 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3747 " it is incompatible with the installed C and C++ libraries");
3749 /* Numerous experiment shows that IRA based loop pressure
3750 calculation works better for RTL loop invariant motion on targets
3751 with enough (>= 32) registers. It is an expensive optimization.
3752 So it is on only for peak performance. */
3753 if (optimize >= 3 && global_init_p
3754 && !global_options_set.x_flag_ira_loop_pressure)
3755 flag_ira_loop_pressure = 1;
3757 /* Set the pointer size. */
3758 if (TARGET_64BIT)
3760 rs6000_pmode = (int)DImode;
3761 rs6000_pointer_size = 64;
3763 else
3765 rs6000_pmode = (int)SImode;
3766 rs6000_pointer_size = 32;
3769 /* Some OSs don't support saving the high part of 64-bit registers on context
3770 switch. Other OSs don't support saving Altivec registers. On those OSs,
3771 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3772 if the user wants either, the user must explicitly specify them and we
3773 won't interfere with the user's specification. */
3775 set_masks = POWERPC_MASKS;
3776 #ifdef OS_MISSING_POWERPC64
3777 if (OS_MISSING_POWERPC64)
3778 set_masks &= ~OPTION_MASK_POWERPC64;
3779 #endif
3780 #ifdef OS_MISSING_ALTIVEC
3781 if (OS_MISSING_ALTIVEC)
3782 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3783 #endif
3785 /* Don't override by the processor default if given explicitly. */
3786 set_masks &= ~rs6000_isa_flags_explicit;
3788 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3789 the cpu in a target attribute or pragma, but did not specify a tuning
3790 option, use the cpu for the tuning option rather than the option specified
3791 with -mtune on the command line. Process a '--with-cpu' configuration
3792 request as an implicit --cpu. */
3793 if (rs6000_cpu_index >= 0)
3795 cpu_index = rs6000_cpu_index;
3796 have_cpu = true;
3798 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3800 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3801 have_cpu = true;
3803 else if (implicit_cpu)
3805 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3806 have_cpu = true;
3808 else
3810 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3811 const char *default_cpu = ((!TARGET_POWERPC64)
3812 ? "powerpc"
3813 : ((BYTES_BIG_ENDIAN)
3814 ? "powerpc64"
3815 : "powerpc64le"));
3817 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3818 have_cpu = false;
3821 gcc_assert (cpu_index >= 0);
3823 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3824 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3825 with those from the cpu, except for options that were explicitly set. If
3826 we don't have a cpu, do not override the target bits set in
3827 TARGET_DEFAULT. */
3828 if (have_cpu)
3830 rs6000_isa_flags &= ~set_masks;
3831 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3832 & set_masks);
3834 else
3836 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3837 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3838 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3839 to using rs6000_isa_flags, we need to do the initialization here.
3841 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3842 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3843 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
3844 : processor_target_table[cpu_index].target_enable);
3845 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3848 if (rs6000_tune_index >= 0)
3849 tune_index = rs6000_tune_index;
3850 else if (have_cpu)
3851 rs6000_tune_index = tune_index = cpu_index;
3852 else
3854 size_t i;
3855 enum processor_type tune_proc
3856 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3858 tune_index = -1;
3859 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3860 if (processor_target_table[i].processor == tune_proc)
3862 rs6000_tune_index = tune_index = i;
3863 break;
3867 gcc_assert (tune_index >= 0);
3868 rs6000_cpu = processor_target_table[tune_index].processor;
3870 /* Pick defaults for SPE related control flags. Do this early to make sure
3871 that the TARGET_ macros are representative ASAP. */
3873 int spe_capable_cpu =
3874 (rs6000_cpu == PROCESSOR_PPC8540
3875 || rs6000_cpu == PROCESSOR_PPC8548);
3877 if (!global_options_set.x_rs6000_spe_abi)
3878 rs6000_spe_abi = spe_capable_cpu;
3880 if (!global_options_set.x_rs6000_spe)
3881 rs6000_spe = spe_capable_cpu;
3883 if (!global_options_set.x_rs6000_float_gprs)
3884 rs6000_float_gprs =
3885 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3886 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3887 : 0);
3890 if (global_options_set.x_rs6000_spe_abi
3891 && rs6000_spe_abi
3892 && !TARGET_SPE_ABI)
3893 error ("not configured for SPE ABI");
3895 if (global_options_set.x_rs6000_spe
3896 && rs6000_spe
3897 && !TARGET_SPE)
3898 error ("not configured for SPE instruction set");
3900 if (main_target_opt != NULL
3901 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3902 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3903 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3904 error ("target attribute or pragma changes SPE ABI");
3906 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3907 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3908 || rs6000_cpu == PROCESSOR_PPCE5500)
3910 if (TARGET_ALTIVEC)
3911 error ("AltiVec not supported in this target");
3912 if (TARGET_SPE)
3913 error ("SPE not supported in this target");
3915 if (rs6000_cpu == PROCESSOR_PPCE6500)
3917 if (TARGET_SPE)
3918 error ("SPE not supported in this target");
3921 /* Disable Cell microcode if we are optimizing for the Cell
3922 and not optimizing for size. */
3923 if (rs6000_gen_cell_microcode == -1)
3924 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3925 && !optimize_size);
3927 /* If we are optimizing big endian systems for space and it's OK to
3928 use instructions that would be microcoded on the Cell, use the
3929 load/store multiple and string instructions. */
3930 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3931 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3932 | OPTION_MASK_STRING);
3934 /* Don't allow -mmultiple or -mstring on little endian systems
3935 unless the cpu is a 750, because the hardware doesn't support the
3936 instructions used in little endian mode, and causes an alignment
3937 trap. The 750 does not cause an alignment trap (except when the
3938 target is unaligned). */
3940 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3942 if (TARGET_MULTIPLE)
3944 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3945 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3946 warning (0, "-mmultiple is not supported on little endian systems");
3949 if (TARGET_STRING)
3951 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3952 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3953 warning (0, "-mstring is not supported on little endian systems");
3957 /* If little-endian, default to -mstrict-align on older processors.
3958 Testing for htm matches power8 and later. */
3959 if (!BYTES_BIG_ENDIAN
3960 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3961 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3963 /* -maltivec={le,be} implies -maltivec. */
3964 if (rs6000_altivec_element_order != 0)
3965 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3967 /* Disallow -maltivec=le in big endian mode for now. This is not
3968 known to be useful for anyone. */
3969 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3971 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3972 rs6000_altivec_element_order = 0;
3975 /* Add some warnings for VSX. */
3976 if (TARGET_VSX)
3978 const char *msg = NULL;
3979 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3980 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3982 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3983 msg = N_("-mvsx requires hardware floating point");
3984 else
3986 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3987 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3990 else if (TARGET_PAIRED_FLOAT)
3991 msg = N_("-mvsx and -mpaired are incompatible");
3992 else if (TARGET_AVOID_XFORM > 0)
3993 msg = N_("-mvsx needs indexed addressing");
3994 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3995 & OPTION_MASK_ALTIVEC))
3997 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3998 msg = N_("-mvsx and -mno-altivec are incompatible");
3999 else
4000 msg = N_("-mno-altivec disables vsx");
4003 if (msg)
4005 warning (0, msg);
4006 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4007 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4011 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4012 the -mcpu setting to enable options that conflict. */
4013 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4014 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4015 | OPTION_MASK_ALTIVEC
4016 | OPTION_MASK_VSX)) != 0)
4017 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4018 | OPTION_MASK_DIRECT_MOVE)
4019 & ~rs6000_isa_flags_explicit);
4021 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4022 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4024 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4025 unless the user explicitly used the -mno-<option> to disable the code. */
4026 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM_SCALAR
4027 || TARGET_P9_DFORM_VECTOR || TARGET_P9_DFORM_BOTH > 0 || TARGET_P9_MINMAX)
4028 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4029 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4030 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4031 else if (TARGET_VSX)
4032 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4033 else if (TARGET_POPCNTD)
4034 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4035 else if (TARGET_DFP)
4036 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4037 else if (TARGET_CMPB)
4038 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4039 else if (TARGET_FPRND)
4040 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
4041 else if (TARGET_POPCNTB)
4042 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
4043 else if (TARGET_ALTIVEC)
4044 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
4046 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4048 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4049 error ("-mcrypto requires -maltivec");
4050 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4053 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4055 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4056 error ("-mdirect-move requires -mvsx");
4057 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4060 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4062 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4063 error ("-mpower8-vector requires -maltivec");
4064 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4067 if (TARGET_P8_VECTOR && !TARGET_VSX)
4069 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4070 error ("-mpower8-vector requires -mvsx");
4071 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4074 if (TARGET_VSX_TIMODE && !TARGET_VSX)
4076 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
4077 error ("-mvsx-timode requires -mvsx");
4078 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4081 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4083 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4084 error ("-mhard-dfp requires -mhard-float");
4085 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4088 /* Allow an explicit -mupper-regs to set -mupper-regs-df, -mupper-regs-di,
4089 and -mupper-regs-sf, depending on the cpu, unless the user explicitly also
4090 set the individual option. */
4091 if (TARGET_UPPER_REGS > 0)
4093 if (TARGET_VSX
4094 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4096 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DF;
4097 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4099 if (TARGET_VSX
4100 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4102 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DI;
4103 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4105 if (TARGET_P8_VECTOR
4106 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4108 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_SF;
4109 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4112 else if (TARGET_UPPER_REGS == 0)
4114 if (TARGET_VSX
4115 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4117 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4118 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4120 if (TARGET_VSX
4121 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4123 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DI;
4124 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4126 if (TARGET_P8_VECTOR
4127 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4129 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4130 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4134 if (TARGET_UPPER_REGS_DF && !TARGET_VSX)
4136 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4137 error ("-mupper-regs-df requires -mvsx");
4138 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4141 if (TARGET_UPPER_REGS_DI && !TARGET_VSX)
4143 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4144 error ("-mupper-regs-di requires -mvsx");
4145 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4148 if (TARGET_UPPER_REGS_SF && !TARGET_P8_VECTOR)
4150 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4151 error ("-mupper-regs-sf requires -mpower8-vector");
4152 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4155 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4156 silently turn off quad memory mode. */
4157 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4159 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4160 warning (0, N_("-mquad-memory requires 64-bit mode"));
4162 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4163 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4165 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4166 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4169 /* Non-atomic quad memory load/store are disabled for little endian, since
4170 the words are reversed, but atomic operations can still be done by
4171 swapping the words. */
4172 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4174 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4175 warning (0, N_("-mquad-memory is not available in little endian mode"));
4177 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4180 /* Assume if the user asked for normal quad memory instructions, they want
4181 the atomic versions as well, unless they explicity told us not to use quad
4182 word atomic instructions. */
4183 if (TARGET_QUAD_MEMORY
4184 && !TARGET_QUAD_MEMORY_ATOMIC
4185 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4186 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4188 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4189 generating power8 instructions. */
4190 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4191 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4192 & OPTION_MASK_P8_FUSION);
4194 /* Setting additional fusion flags turns on base fusion. */
4195 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4197 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4199 if (TARGET_P8_FUSION_SIGN)
4200 error ("-mpower8-fusion-sign requires -mpower8-fusion");
4202 if (TARGET_TOC_FUSION)
4203 error ("-mtoc-fusion requires -mpower8-fusion");
4205 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4207 else
4208 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4211 /* Power9 fusion is a superset over power8 fusion. */
4212 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4214 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4216 /* We prefer to not mention undocumented options in
4217 error messages. However, if users have managed to select
4218 power9-fusion without selecting power8-fusion, they
4219 already know about undocumented flags. */
4220 error ("-mpower9-fusion requires -mpower8-fusion");
4221 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4223 else
4224 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4227 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4228 generating power9 instructions. */
4229 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4230 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4231 & OPTION_MASK_P9_FUSION);
4233 /* Power8 does not fuse sign extended loads with the addis. If we are
4234 optimizing at high levels for speed, convert a sign extended load into a
4235 zero extending load, and an explicit sign extension. */
4236 if (TARGET_P8_FUSION
4237 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4238 && optimize_function_for_speed_p (cfun)
4239 && optimize >= 3)
4240 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4242 /* TOC fusion requires 64-bit and medium/large code model. */
4243 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4245 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4246 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4247 warning (0, N_("-mtoc-fusion requires 64-bit"));
4250 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4252 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4253 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4254 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4257 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4258 model. */
4259 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4260 && (TARGET_CMODEL != CMODEL_SMALL)
4261 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4262 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4264 /* ISA 3.0 vector instructions include ISA 2.07. */
4265 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4267 /* We prefer to not mention undocumented options in
4268 error messages. However, if users have managed to select
4269 power9-vector without selecting power8-vector, they
4270 already know about undocumented flags. */
4271 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4272 error ("-mpower9-vector requires -mpower8-vector");
4273 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4276 /* -mpower9-dform turns on both -mpower9-dform-scalar and
4277 -mpower9-dform-vector. */
4278 if (TARGET_P9_DFORM_BOTH > 0)
4280 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4281 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
4283 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4284 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_SCALAR;
4286 else if (TARGET_P9_DFORM_BOTH == 0)
4288 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4289 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
4291 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4292 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4295 /* ISA 3.0 D-form instructions require p9-vector and upper-regs. */
4296 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR) && !TARGET_P9_VECTOR)
4298 /* We prefer to not mention undocumented options in
4299 error messages. However, if users have managed to select
4300 power9-dform without selecting power9-vector, they
4301 already know about undocumented flags. */
4302 if (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4303 error ("-mpower9-dform requires -mpower9-vector");
4304 rs6000_isa_flags &= ~(OPTION_MASK_P9_DFORM_SCALAR
4305 | OPTION_MASK_P9_DFORM_VECTOR);
4308 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_DF)
4310 /* We prefer to not mention undocumented options in
4311 error messages. However, if users have managed to select
4312 power9-dform without selecting upper-regs-df, they
4313 already know about undocumented flags. */
4314 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4315 error ("-mpower9-dform requires -mupper-regs-df");
4316 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4319 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_SF)
4321 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4322 error ("-mpower9-dform requires -mupper-regs-sf");
4323 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4326 /* There have been bugs with -mvsx-timode that don't show up with -mlra,
4327 but do show up with -mno-lra. Given -mlra will become the default once
4328 PR 69847 is fixed, turn off the options with problems by default if
4329 -mno-lra was used, and warn if the user explicitly asked for the option.
4331 Enable -mpower9-dform-vector by default if LRA and other power9 options.
4332 Enable -mvsx-timode by default if LRA and VSX. */
4333 if (!TARGET_LRA)
4335 if (TARGET_VSX_TIMODE)
4337 if ((rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) != 0)
4338 warning (0, "-mvsx-timode might need -mlra");
4340 else
4341 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4345 else
4347 if (TARGET_VSX && !TARGET_VSX_TIMODE
4348 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
4349 rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
4352 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4353 support. If we only have ISA 2.06 support, and the user did not specify
4354 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4355 but we don't enable the full vectorization support */
4356 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4357 TARGET_ALLOW_MOVMISALIGN = 1;
4359 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4361 if (TARGET_ALLOW_MOVMISALIGN > 0
4362 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4363 error ("-mallow-movmisalign requires -mvsx");
4365 TARGET_ALLOW_MOVMISALIGN = 0;
4368 /* Determine when unaligned vector accesses are permitted, and when
4369 they are preferred over masked Altivec loads. Note that if
4370 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4371 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4372 not true. */
4373 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4375 if (!TARGET_VSX)
4377 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4378 error ("-mefficient-unaligned-vsx requires -mvsx");
4380 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4383 else if (!TARGET_ALLOW_MOVMISALIGN)
4385 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4386 error ("-mefficient-unaligned-vsx requires -mallow-movmisalign");
4388 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4392 /* __float128 requires VSX support. */
4393 if (TARGET_FLOAT128 && !TARGET_VSX)
4395 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128) != 0)
4396 error ("-mfloat128 requires VSX support");
4398 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128 | OPTION_MASK_FLOAT128_HW);
4401 /* If we have -mfloat128 and full ISA 3.0 support, enable -mfloat128-hardware
4402 by default. */
4403 if (TARGET_FLOAT128 && !TARGET_FLOAT128_HW
4404 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4405 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4407 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4408 if ((rs6000_isa_flags & OPTION_MASK_FLOAT128) != 0)
4409 rs6000_isa_flags_explicit |= OPTION_MASK_FLOAT128_HW;
4412 /* IEEE 128-bit floating point hardware instructions imply enabling
4413 __float128. */
4414 if (TARGET_FLOAT128_HW
4415 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4417 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4418 error ("-mfloat128-hardware requires full ISA 3.0 support");
4420 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4423 if (TARGET_FLOAT128_HW
4424 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128) == 0)
4425 rs6000_isa_flags |= OPTION_MASK_FLOAT128;
4427 /* Print the options after updating the defaults. */
4428 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4429 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4431 /* E500mc does "better" if we inline more aggressively. Respect the
4432 user's opinion, though. */
4433 if (rs6000_block_move_inline_limit == 0
4434 && (rs6000_cpu == PROCESSOR_PPCE500MC
4435 || rs6000_cpu == PROCESSOR_PPCE500MC64
4436 || rs6000_cpu == PROCESSOR_PPCE5500
4437 || rs6000_cpu == PROCESSOR_PPCE6500))
4438 rs6000_block_move_inline_limit = 128;
4440 /* store_one_arg depends on expand_block_move to handle at least the
4441 size of reg_parm_stack_space. */
4442 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4443 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4445 if (global_init_p)
4447 /* If the appropriate debug option is enabled, replace the target hooks
4448 with debug versions that call the real version and then prints
4449 debugging information. */
4450 if (TARGET_DEBUG_COST)
4452 targetm.rtx_costs = rs6000_debug_rtx_costs;
4453 targetm.address_cost = rs6000_debug_address_cost;
4454 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4457 if (TARGET_DEBUG_ADDR)
4459 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4460 targetm.legitimize_address = rs6000_debug_legitimize_address;
4461 rs6000_secondary_reload_class_ptr
4462 = rs6000_debug_secondary_reload_class;
4463 rs6000_secondary_memory_needed_ptr
4464 = rs6000_debug_secondary_memory_needed;
4465 rs6000_cannot_change_mode_class_ptr
4466 = rs6000_debug_cannot_change_mode_class;
4467 rs6000_preferred_reload_class_ptr
4468 = rs6000_debug_preferred_reload_class;
4469 rs6000_legitimize_reload_address_ptr
4470 = rs6000_debug_legitimize_reload_address;
4471 rs6000_mode_dependent_address_ptr
4472 = rs6000_debug_mode_dependent_address;
4475 if (rs6000_veclibabi_name)
4477 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4478 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4479 else
4481 error ("unknown vectorization library ABI type (%s) for "
4482 "-mveclibabi= switch", rs6000_veclibabi_name);
4483 ret = false;
4488 if (!global_options_set.x_rs6000_long_double_type_size)
4490 if (main_target_opt != NULL
4491 && (main_target_opt->x_rs6000_long_double_type_size
4492 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4493 error ("target attribute or pragma changes long double size");
4494 else
4495 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4498 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4499 if (!global_options_set.x_rs6000_ieeequad)
4500 rs6000_ieeequad = 1;
4501 #endif
4503 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4504 target attribute or pragma which automatically enables both options,
4505 unless the altivec ABI was set. This is set by default for 64-bit, but
4506 not for 32-bit. */
4507 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4508 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4509 | OPTION_MASK_FLOAT128)
4510 & ~rs6000_isa_flags_explicit);
4512 /* Enable Altivec ABI for AIX -maltivec. */
4513 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4515 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4516 error ("target attribute or pragma changes AltiVec ABI");
4517 else
4518 rs6000_altivec_abi = 1;
4521 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4522 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4523 be explicitly overridden in either case. */
4524 if (TARGET_ELF)
4526 if (!global_options_set.x_rs6000_altivec_abi
4527 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4529 if (main_target_opt != NULL &&
4530 !main_target_opt->x_rs6000_altivec_abi)
4531 error ("target attribute or pragma changes AltiVec ABI");
4532 else
4533 rs6000_altivec_abi = 1;
4537 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4538 So far, the only darwin64 targets are also MACH-O. */
4539 if (TARGET_MACHO
4540 && DEFAULT_ABI == ABI_DARWIN
4541 && TARGET_64BIT)
4543 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4544 error ("target attribute or pragma changes darwin64 ABI");
4545 else
4547 rs6000_darwin64_abi = 1;
4548 /* Default to natural alignment, for better performance. */
4549 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4553 /* Place FP constants in the constant pool instead of TOC
4554 if section anchors enabled. */
4555 if (flag_section_anchors
4556 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4557 TARGET_NO_FP_IN_TOC = 1;
4559 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4560 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4562 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4563 SUBTARGET_OVERRIDE_OPTIONS;
4564 #endif
4565 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4566 SUBSUBTARGET_OVERRIDE_OPTIONS;
4567 #endif
4568 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4569 SUB3TARGET_OVERRIDE_OPTIONS;
4570 #endif
4572 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4573 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4575 /* For the E500 family of cores, reset the single/double FP flags to let us
4576 check that they remain constant across attributes or pragmas. Also,
4577 clear a possible request for string instructions, not supported and which
4578 we might have silently queried above for -Os.
4580 For other families, clear ISEL in case it was set implicitly.
4583 switch (rs6000_cpu)
4585 case PROCESSOR_PPC8540:
4586 case PROCESSOR_PPC8548:
4587 case PROCESSOR_PPCE500MC:
4588 case PROCESSOR_PPCE500MC64:
4589 case PROCESSOR_PPCE5500:
4590 case PROCESSOR_PPCE6500:
4592 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
4593 rs6000_double_float = TARGET_E500_DOUBLE;
4595 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4597 break;
4599 default:
4601 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4602 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4604 break;
4607 if (main_target_opt)
4609 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4610 error ("target attribute or pragma changes single precision floating "
4611 "point");
4612 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4613 error ("target attribute or pragma changes double precision floating "
4614 "point");
4617 /* Detect invalid option combinations with E500. */
4618 CHECK_E500_OPTIONS;
4620 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4621 && rs6000_cpu != PROCESSOR_POWER5
4622 && rs6000_cpu != PROCESSOR_POWER6
4623 && rs6000_cpu != PROCESSOR_POWER7
4624 && rs6000_cpu != PROCESSOR_POWER8
4625 && rs6000_cpu != PROCESSOR_POWER9
4626 && rs6000_cpu != PROCESSOR_PPCA2
4627 && rs6000_cpu != PROCESSOR_CELL
4628 && rs6000_cpu != PROCESSOR_PPC476);
4629 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4630 || rs6000_cpu == PROCESSOR_POWER5
4631 || rs6000_cpu == PROCESSOR_POWER7
4632 || rs6000_cpu == PROCESSOR_POWER8);
4633 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4634 || rs6000_cpu == PROCESSOR_POWER5
4635 || rs6000_cpu == PROCESSOR_POWER6
4636 || rs6000_cpu == PROCESSOR_POWER7
4637 || rs6000_cpu == PROCESSOR_POWER8
4638 || rs6000_cpu == PROCESSOR_POWER9
4639 || rs6000_cpu == PROCESSOR_PPCE500MC
4640 || rs6000_cpu == PROCESSOR_PPCE500MC64
4641 || rs6000_cpu == PROCESSOR_PPCE5500
4642 || rs6000_cpu == PROCESSOR_PPCE6500);
4644 /* Allow debug switches to override the above settings. These are set to -1
4645 in rs6000.opt to indicate the user hasn't directly set the switch. */
4646 if (TARGET_ALWAYS_HINT >= 0)
4647 rs6000_always_hint = TARGET_ALWAYS_HINT;
4649 if (TARGET_SCHED_GROUPS >= 0)
4650 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4652 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4653 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4655 rs6000_sched_restricted_insns_priority
4656 = (rs6000_sched_groups ? 1 : 0);
4658 /* Handle -msched-costly-dep option. */
4659 rs6000_sched_costly_dep
4660 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4662 if (rs6000_sched_costly_dep_str)
4664 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4665 rs6000_sched_costly_dep = no_dep_costly;
4666 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4667 rs6000_sched_costly_dep = all_deps_costly;
4668 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4669 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4670 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4671 rs6000_sched_costly_dep = store_to_load_dep_costly;
4672 else
4673 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4674 atoi (rs6000_sched_costly_dep_str));
4677 /* Handle -minsert-sched-nops option. */
4678 rs6000_sched_insert_nops
4679 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4681 if (rs6000_sched_insert_nops_str)
4683 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4684 rs6000_sched_insert_nops = sched_finish_none;
4685 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4686 rs6000_sched_insert_nops = sched_finish_pad_groups;
4687 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4688 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4689 else
4690 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4691 atoi (rs6000_sched_insert_nops_str));
4694 if (global_init_p)
4696 #ifdef TARGET_REGNAMES
4697 /* If the user desires alternate register names, copy in the
4698 alternate names now. */
4699 if (TARGET_REGNAMES)
4700 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4701 #endif
4703 /* Set aix_struct_return last, after the ABI is determined.
4704 If -maix-struct-return or -msvr4-struct-return was explicitly
4705 used, don't override with the ABI default. */
4706 if (!global_options_set.x_aix_struct_return)
4707 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4709 #if 0
4710 /* IBM XL compiler defaults to unsigned bitfields. */
4711 if (TARGET_XL_COMPAT)
4712 flag_signed_bitfields = 0;
4713 #endif
4715 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4716 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4718 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4720 /* We can only guarantee the availability of DI pseudo-ops when
4721 assembling for 64-bit targets. */
4722 if (!TARGET_64BIT)
4724 targetm.asm_out.aligned_op.di = NULL;
4725 targetm.asm_out.unaligned_op.di = NULL;
4729 /* Set branch target alignment, if not optimizing for size. */
4730 if (!optimize_size)
4732 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4733 aligned 8byte to avoid misprediction by the branch predictor. */
4734 if (rs6000_cpu == PROCESSOR_TITAN
4735 || rs6000_cpu == PROCESSOR_CELL)
4737 if (align_functions <= 0)
4738 align_functions = 8;
4739 if (align_jumps <= 0)
4740 align_jumps = 8;
4741 if (align_loops <= 0)
4742 align_loops = 8;
4744 if (rs6000_align_branch_targets)
4746 if (align_functions <= 0)
4747 align_functions = 16;
4748 if (align_jumps <= 0)
4749 align_jumps = 16;
4750 if (align_loops <= 0)
4752 can_override_loop_align = 1;
4753 align_loops = 16;
4756 if (align_jumps_max_skip <= 0)
4757 align_jumps_max_skip = 15;
4758 if (align_loops_max_skip <= 0)
4759 align_loops_max_skip = 15;
4762 /* Arrange to save and restore machine status around nested functions. */
4763 init_machine_status = rs6000_init_machine_status;
4765 /* We should always be splitting complex arguments, but we can't break
4766 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4767 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4768 targetm.calls.split_complex_arg = NULL;
4771 /* Initialize rs6000_cost with the appropriate target costs. */
4772 if (optimize_size)
4773 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4774 else
4775 switch (rs6000_cpu)
4777 case PROCESSOR_RS64A:
4778 rs6000_cost = &rs64a_cost;
4779 break;
4781 case PROCESSOR_MPCCORE:
4782 rs6000_cost = &mpccore_cost;
4783 break;
4785 case PROCESSOR_PPC403:
4786 rs6000_cost = &ppc403_cost;
4787 break;
4789 case PROCESSOR_PPC405:
4790 rs6000_cost = &ppc405_cost;
4791 break;
4793 case PROCESSOR_PPC440:
4794 rs6000_cost = &ppc440_cost;
4795 break;
4797 case PROCESSOR_PPC476:
4798 rs6000_cost = &ppc476_cost;
4799 break;
4801 case PROCESSOR_PPC601:
4802 rs6000_cost = &ppc601_cost;
4803 break;
4805 case PROCESSOR_PPC603:
4806 rs6000_cost = &ppc603_cost;
4807 break;
4809 case PROCESSOR_PPC604:
4810 rs6000_cost = &ppc604_cost;
4811 break;
4813 case PROCESSOR_PPC604e:
4814 rs6000_cost = &ppc604e_cost;
4815 break;
4817 case PROCESSOR_PPC620:
4818 rs6000_cost = &ppc620_cost;
4819 break;
4821 case PROCESSOR_PPC630:
4822 rs6000_cost = &ppc630_cost;
4823 break;
4825 case PROCESSOR_CELL:
4826 rs6000_cost = &ppccell_cost;
4827 break;
4829 case PROCESSOR_PPC750:
4830 case PROCESSOR_PPC7400:
4831 rs6000_cost = &ppc750_cost;
4832 break;
4834 case PROCESSOR_PPC7450:
4835 rs6000_cost = &ppc7450_cost;
4836 break;
4838 case PROCESSOR_PPC8540:
4839 case PROCESSOR_PPC8548:
4840 rs6000_cost = &ppc8540_cost;
4841 break;
4843 case PROCESSOR_PPCE300C2:
4844 case PROCESSOR_PPCE300C3:
4845 rs6000_cost = &ppce300c2c3_cost;
4846 break;
4848 case PROCESSOR_PPCE500MC:
4849 rs6000_cost = &ppce500mc_cost;
4850 break;
4852 case PROCESSOR_PPCE500MC64:
4853 rs6000_cost = &ppce500mc64_cost;
4854 break;
4856 case PROCESSOR_PPCE5500:
4857 rs6000_cost = &ppce5500_cost;
4858 break;
4860 case PROCESSOR_PPCE6500:
4861 rs6000_cost = &ppce6500_cost;
4862 break;
4864 case PROCESSOR_TITAN:
4865 rs6000_cost = &titan_cost;
4866 break;
4868 case PROCESSOR_POWER4:
4869 case PROCESSOR_POWER5:
4870 rs6000_cost = &power4_cost;
4871 break;
4873 case PROCESSOR_POWER6:
4874 rs6000_cost = &power6_cost;
4875 break;
4877 case PROCESSOR_POWER7:
4878 rs6000_cost = &power7_cost;
4879 break;
4881 case PROCESSOR_POWER8:
4882 rs6000_cost = &power8_cost;
4883 break;
4885 case PROCESSOR_POWER9:
4886 rs6000_cost = &power9_cost;
4887 break;
4889 case PROCESSOR_PPCA2:
4890 rs6000_cost = &ppca2_cost;
4891 break;
4893 default:
4894 gcc_unreachable ();
4897 if (global_init_p)
4899 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4900 rs6000_cost->simultaneous_prefetches,
4901 global_options.x_param_values,
4902 global_options_set.x_param_values);
4903 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4904 global_options.x_param_values,
4905 global_options_set.x_param_values);
4906 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4907 rs6000_cost->cache_line_size,
4908 global_options.x_param_values,
4909 global_options_set.x_param_values);
4910 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4911 global_options.x_param_values,
4912 global_options_set.x_param_values);
4914 /* Increase loop peeling limits based on performance analysis. */
4915 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4916 global_options.x_param_values,
4917 global_options_set.x_param_values);
4918 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4919 global_options.x_param_values,
4920 global_options_set.x_param_values);
4922 /* If using typedef char *va_list, signal that
4923 __builtin_va_start (&ap, 0) can be optimized to
4924 ap = __builtin_next_arg (0). */
4925 if (DEFAULT_ABI != ABI_V4)
4926 targetm.expand_builtin_va_start = NULL;
4929 /* Set up single/double float flags.
4930 If TARGET_HARD_FLOAT is set, but neither single or double is set,
4931 then set both flags. */
4932 if (TARGET_HARD_FLOAT && TARGET_FPRS
4933 && rs6000_single_float == 0 && rs6000_double_float == 0)
4934 rs6000_single_float = rs6000_double_float = 1;
4936 /* If not explicitly specified via option, decide whether to generate indexed
4937 load/store instructions. */
4938 if (TARGET_AVOID_XFORM == -1)
4939 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4940 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4941 need indexed accesses and the type used is the scalar type of the element
4942 being loaded or stored. */
4943 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
4944 && !TARGET_ALTIVEC);
4946 /* Set the -mrecip options. */
4947 if (rs6000_recip_name)
4949 char *p = ASTRDUP (rs6000_recip_name);
4950 char *q;
4951 unsigned int mask, i;
4952 bool invert;
4954 while ((q = strtok (p, ",")) != NULL)
4956 p = NULL;
4957 if (*q == '!')
4959 invert = true;
4960 q++;
4962 else
4963 invert = false;
4965 if (!strcmp (q, "default"))
4966 mask = ((TARGET_RECIP_PRECISION)
4967 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4968 else
4970 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4971 if (!strcmp (q, recip_options[i].string))
4973 mask = recip_options[i].mask;
4974 break;
4977 if (i == ARRAY_SIZE (recip_options))
4979 error ("unknown option for -mrecip=%s", q);
4980 invert = false;
4981 mask = 0;
4982 ret = false;
4986 if (invert)
4987 rs6000_recip_control &= ~mask;
4988 else
4989 rs6000_recip_control |= mask;
4993 /* Set the builtin mask of the various options used that could affect which
4994 builtins were used. In the past we used target_flags, but we've run out
4995 of bits, and some options like SPE and PAIRED are no longer in
4996 target_flags. */
4997 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4998 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4999 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5000 rs6000_builtin_mask);
5002 /* Initialize all of the registers. */
5003 rs6000_init_hard_regno_mode_ok (global_init_p);
5005 /* Save the initial options in case the user does function specific options */
5006 if (global_init_p)
5007 target_option_default_node = target_option_current_node
5008 = build_target_option_node (&global_options);
5010 /* If not explicitly specified via option, decide whether to generate the
5011 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5012 if (TARGET_LINK_STACK == -1)
5013 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5015 return ret;
5018 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5019 define the target cpu type. */
5021 static void
5022 rs6000_option_override (void)
5024 (void) rs6000_option_override_internal (true);
5026 /* Register machine-specific passes. This needs to be done at start-up.
5027 It's convenient to do it here (like i386 does). */
5028 opt_pass *pass_analyze_swaps = make_pass_analyze_swaps (g);
5030 struct register_pass_info analyze_swaps_info
5031 = { pass_analyze_swaps, "cse1", 1, PASS_POS_INSERT_BEFORE };
5033 register_pass (&analyze_swaps_info);
5037 /* Implement targetm.vectorize.builtin_mask_for_load. */
5038 static tree
5039 rs6000_builtin_mask_for_load (void)
5041 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5042 if ((TARGET_ALTIVEC && !TARGET_VSX)
5043 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5044 return altivec_builtin_mask_for_load;
5045 else
5046 return 0;
5049 /* Implement LOOP_ALIGN. */
5051 rs6000_loop_align (rtx label)
5053 basic_block bb;
5054 int ninsns;
5056 /* Don't override loop alignment if -falign-loops was specified. */
5057 if (!can_override_loop_align)
5058 return align_loops_log;
5060 bb = BLOCK_FOR_INSN (label);
5061 ninsns = num_loop_insns(bb->loop_father);
5063 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5064 if (ninsns > 4 && ninsns <= 8
5065 && (rs6000_cpu == PROCESSOR_POWER4
5066 || rs6000_cpu == PROCESSOR_POWER5
5067 || rs6000_cpu == PROCESSOR_POWER6
5068 || rs6000_cpu == PROCESSOR_POWER7
5069 || rs6000_cpu == PROCESSOR_POWER8
5070 || rs6000_cpu == PROCESSOR_POWER9))
5071 return 5;
5072 else
5073 return align_loops_log;
5076 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5077 static int
5078 rs6000_loop_align_max_skip (rtx_insn *label)
5080 return (1 << rs6000_loop_align (label)) - 1;
5083 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5084 after applying N number of iterations. This routine does not determine
5085 how may iterations are required to reach desired alignment. */
5087 static bool
5088 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5090 if (is_packed)
5091 return false;
5093 if (TARGET_32BIT)
5095 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5096 return true;
5098 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5099 return true;
5101 return false;
5103 else
5105 if (TARGET_MACHO)
5106 return false;
5108 /* Assuming that all other types are naturally aligned. CHECKME! */
5109 return true;
5113 /* Return true if the vector misalignment factor is supported by the
5114 target. */
5115 static bool
5116 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5117 const_tree type,
5118 int misalignment,
5119 bool is_packed)
5121 if (TARGET_VSX)
5123 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5124 return true;
5126 /* Return if movmisalign pattern is not supported for this mode. */
5127 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5128 return false;
5130 if (misalignment == -1)
5132 /* Misalignment factor is unknown at compile time but we know
5133 it's word aligned. */
5134 if (rs6000_vector_alignment_reachable (type, is_packed))
5136 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5138 if (element_size == 64 || element_size == 32)
5139 return true;
5142 return false;
5145 /* VSX supports word-aligned vector. */
5146 if (misalignment % 4 == 0)
5147 return true;
5149 return false;
5152 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5153 static int
5154 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5155 tree vectype, int misalign)
5157 unsigned elements;
5158 tree elem_type;
5160 switch (type_of_cost)
5162 case scalar_stmt:
5163 case scalar_load:
5164 case scalar_store:
5165 case vector_stmt:
5166 case vector_load:
5167 case vector_store:
5168 case vec_to_scalar:
5169 case scalar_to_vec:
5170 case cond_branch_not_taken:
5171 return 1;
5173 case vec_perm:
5174 if (TARGET_VSX)
5175 return 3;
5176 else
5177 return 1;
5179 case vec_promote_demote:
5180 if (TARGET_VSX)
5181 return 4;
5182 else
5183 return 1;
5185 case cond_branch_taken:
5186 return 3;
5188 case unaligned_load:
5189 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5190 return 1;
5192 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5194 elements = TYPE_VECTOR_SUBPARTS (vectype);
5195 if (elements == 2)
5196 /* Double word aligned. */
5197 return 2;
5199 if (elements == 4)
5201 switch (misalign)
5203 case 8:
5204 /* Double word aligned. */
5205 return 2;
5207 case -1:
5208 /* Unknown misalignment. */
5209 case 4:
5210 case 12:
5211 /* Word aligned. */
5212 return 22;
5214 default:
5215 gcc_unreachable ();
5220 if (TARGET_ALTIVEC)
5221 /* Misaligned loads are not supported. */
5222 gcc_unreachable ();
5224 return 2;
5226 case unaligned_store:
5227 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5228 return 1;
5230 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5232 elements = TYPE_VECTOR_SUBPARTS (vectype);
5233 if (elements == 2)
5234 /* Double word aligned. */
5235 return 2;
5237 if (elements == 4)
5239 switch (misalign)
5241 case 8:
5242 /* Double word aligned. */
5243 return 2;
5245 case -1:
5246 /* Unknown misalignment. */
5247 case 4:
5248 case 12:
5249 /* Word aligned. */
5250 return 23;
5252 default:
5253 gcc_unreachable ();
5258 if (TARGET_ALTIVEC)
5259 /* Misaligned stores are not supported. */
5260 gcc_unreachable ();
5262 return 2;
5264 case vec_construct:
5265 elements = TYPE_VECTOR_SUBPARTS (vectype);
5266 elem_type = TREE_TYPE (vectype);
5267 /* 32-bit vectors loaded into registers are stored as double
5268 precision, so we need n/2 converts in addition to the usual
5269 n/2 merges to construct a vector of short floats from them. */
5270 if (SCALAR_FLOAT_TYPE_P (elem_type)
5271 && TYPE_PRECISION (elem_type) == 32)
5272 return elements + 1;
5273 else
5274 return elements / 2 + 1;
5276 default:
5277 gcc_unreachable ();
5281 /* Implement targetm.vectorize.preferred_simd_mode. */
5283 static machine_mode
5284 rs6000_preferred_simd_mode (machine_mode mode)
5286 if (TARGET_VSX)
5287 switch (mode)
5289 case DFmode:
5290 return V2DFmode;
5291 default:;
5293 if (TARGET_ALTIVEC || TARGET_VSX)
5294 switch (mode)
5296 case SFmode:
5297 return V4SFmode;
5298 case TImode:
5299 return V1TImode;
5300 case DImode:
5301 return V2DImode;
5302 case SImode:
5303 return V4SImode;
5304 case HImode:
5305 return V8HImode;
5306 case QImode:
5307 return V16QImode;
5308 default:;
5310 if (TARGET_SPE)
5311 switch (mode)
5313 case SFmode:
5314 return V2SFmode;
5315 case SImode:
5316 return V2SImode;
5317 default:;
5319 if (TARGET_PAIRED_FLOAT
5320 && mode == SFmode)
5321 return V2SFmode;
5322 return word_mode;
5325 typedef struct _rs6000_cost_data
5327 struct loop *loop_info;
5328 unsigned cost[3];
5329 } rs6000_cost_data;
5331 /* Test for likely overcommitment of vector hardware resources. If a
5332 loop iteration is relatively large, and too large a percentage of
5333 instructions in the loop are vectorized, the cost model may not
5334 adequately reflect delays from unavailable vector resources.
5335 Penalize the loop body cost for this case. */
5337 static void
5338 rs6000_density_test (rs6000_cost_data *data)
5340 const int DENSITY_PCT_THRESHOLD = 85;
5341 const int DENSITY_SIZE_THRESHOLD = 70;
5342 const int DENSITY_PENALTY = 10;
5343 struct loop *loop = data->loop_info;
5344 basic_block *bbs = get_loop_body (loop);
5345 int nbbs = loop->num_nodes;
5346 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5347 int i, density_pct;
5349 for (i = 0; i < nbbs; i++)
5351 basic_block bb = bbs[i];
5352 gimple_stmt_iterator gsi;
5354 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5356 gimple *stmt = gsi_stmt (gsi);
5357 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5359 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5360 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5361 not_vec_cost++;
5365 free (bbs);
5366 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5368 if (density_pct > DENSITY_PCT_THRESHOLD
5369 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5371 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5372 if (dump_enabled_p ())
5373 dump_printf_loc (MSG_NOTE, vect_location,
5374 "density %d%%, cost %d exceeds threshold, penalizing "
5375 "loop body cost by %d%%", density_pct,
5376 vec_cost + not_vec_cost, DENSITY_PENALTY);
5380 /* Implement targetm.vectorize.init_cost. */
5382 static void *
5383 rs6000_init_cost (struct loop *loop_info)
5385 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5386 data->loop_info = loop_info;
5387 data->cost[vect_prologue] = 0;
5388 data->cost[vect_body] = 0;
5389 data->cost[vect_epilogue] = 0;
5390 return data;
5393 /* Implement targetm.vectorize.add_stmt_cost. */
5395 static unsigned
5396 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5397 struct _stmt_vec_info *stmt_info, int misalign,
5398 enum vect_cost_model_location where)
5400 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5401 unsigned retval = 0;
5403 if (flag_vect_cost_model)
5405 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5406 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5407 misalign);
5408 /* Statements in an inner loop relative to the loop being
5409 vectorized are weighted more heavily. The value here is
5410 arbitrary and could potentially be improved with analysis. */
5411 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5412 count *= 50; /* FIXME. */
5414 retval = (unsigned) (count * stmt_cost);
5415 cost_data->cost[where] += retval;
5418 return retval;
5421 /* Implement targetm.vectorize.finish_cost. */
5423 static void
5424 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5425 unsigned *body_cost, unsigned *epilogue_cost)
5427 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5429 if (cost_data->loop_info)
5430 rs6000_density_test (cost_data);
5432 *prologue_cost = cost_data->cost[vect_prologue];
5433 *body_cost = cost_data->cost[vect_body];
5434 *epilogue_cost = cost_data->cost[vect_epilogue];
5437 /* Implement targetm.vectorize.destroy_cost_data. */
5439 static void
5440 rs6000_destroy_cost_data (void *data)
5442 free (data);
5445 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5446 library with vectorized intrinsics. */
5448 static tree
5449 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5450 tree type_in)
5452 char name[32];
5453 const char *suffix = NULL;
5454 tree fntype, new_fndecl, bdecl = NULL_TREE;
5455 int n_args = 1;
5456 const char *bname;
5457 machine_mode el_mode, in_mode;
5458 int n, in_n;
5460 /* Libmass is suitable for unsafe math only as it does not correctly support
5461 parts of IEEE with the required precision such as denormals. Only support
5462 it if we have VSX to use the simd d2 or f4 functions.
5463 XXX: Add variable length support. */
5464 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5465 return NULL_TREE;
5467 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5468 n = TYPE_VECTOR_SUBPARTS (type_out);
5469 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5470 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5471 if (el_mode != in_mode
5472 || n != in_n)
5473 return NULL_TREE;
5475 switch (fn)
5477 CASE_CFN_ATAN2:
5478 CASE_CFN_HYPOT:
5479 CASE_CFN_POW:
5480 n_args = 2;
5481 /* fall through */
5483 CASE_CFN_ACOS:
5484 CASE_CFN_ACOSH:
5485 CASE_CFN_ASIN:
5486 CASE_CFN_ASINH:
5487 CASE_CFN_ATAN:
5488 CASE_CFN_ATANH:
5489 CASE_CFN_CBRT:
5490 CASE_CFN_COS:
5491 CASE_CFN_COSH:
5492 CASE_CFN_ERF:
5493 CASE_CFN_ERFC:
5494 CASE_CFN_EXP2:
5495 CASE_CFN_EXP:
5496 CASE_CFN_EXPM1:
5497 CASE_CFN_LGAMMA:
5498 CASE_CFN_LOG10:
5499 CASE_CFN_LOG1P:
5500 CASE_CFN_LOG2:
5501 CASE_CFN_LOG:
5502 CASE_CFN_SIN:
5503 CASE_CFN_SINH:
5504 CASE_CFN_SQRT:
5505 CASE_CFN_TAN:
5506 CASE_CFN_TANH:
5507 if (el_mode == DFmode && n == 2)
5509 bdecl = mathfn_built_in (double_type_node, fn);
5510 suffix = "d2"; /* pow -> powd2 */
5512 else if (el_mode == SFmode && n == 4)
5514 bdecl = mathfn_built_in (float_type_node, fn);
5515 suffix = "4"; /* powf -> powf4 */
5517 else
5518 return NULL_TREE;
5519 if (!bdecl)
5520 return NULL_TREE;
5521 break;
5523 default:
5524 return NULL_TREE;
5527 gcc_assert (suffix != NULL);
5528 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5529 if (!bname)
5530 return NULL_TREE;
5532 strcpy (name, bname + sizeof ("__builtin_") - 1);
5533 strcat (name, suffix);
5535 if (n_args == 1)
5536 fntype = build_function_type_list (type_out, type_in, NULL);
5537 else if (n_args == 2)
5538 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5539 else
5540 gcc_unreachable ();
5542 /* Build a function declaration for the vectorized function. */
5543 new_fndecl = build_decl (BUILTINS_LOCATION,
5544 FUNCTION_DECL, get_identifier (name), fntype);
5545 TREE_PUBLIC (new_fndecl) = 1;
5546 DECL_EXTERNAL (new_fndecl) = 1;
5547 DECL_IS_NOVOPS (new_fndecl) = 1;
5548 TREE_READONLY (new_fndecl) = 1;
5550 return new_fndecl;
5553 /* Returns a function decl for a vectorized version of the builtin function
5554 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5555 if it is not available. */
5557 static tree
5558 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5559 tree type_in)
5561 machine_mode in_mode, out_mode;
5562 int in_n, out_n;
5564 if (TARGET_DEBUG_BUILTIN)
5565 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5566 combined_fn_name (combined_fn (fn)),
5567 GET_MODE_NAME (TYPE_MODE (type_out)),
5568 GET_MODE_NAME (TYPE_MODE (type_in)));
5570 if (TREE_CODE (type_out) != VECTOR_TYPE
5571 || TREE_CODE (type_in) != VECTOR_TYPE
5572 || !TARGET_VECTORIZE_BUILTINS)
5573 return NULL_TREE;
5575 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5576 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5577 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5578 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5580 switch (fn)
5582 CASE_CFN_COPYSIGN:
5583 if (VECTOR_UNIT_VSX_P (V2DFmode)
5584 && out_mode == DFmode && out_n == 2
5585 && in_mode == DFmode && in_n == 2)
5586 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5587 if (VECTOR_UNIT_VSX_P (V4SFmode)
5588 && out_mode == SFmode && out_n == 4
5589 && in_mode == SFmode && in_n == 4)
5590 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5591 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5592 && out_mode == SFmode && out_n == 4
5593 && in_mode == SFmode && in_n == 4)
5594 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5595 break;
5596 CASE_CFN_CEIL:
5597 if (VECTOR_UNIT_VSX_P (V2DFmode)
5598 && out_mode == DFmode && out_n == 2
5599 && in_mode == DFmode && in_n == 2)
5600 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5601 if (VECTOR_UNIT_VSX_P (V4SFmode)
5602 && out_mode == SFmode && out_n == 4
5603 && in_mode == SFmode && in_n == 4)
5604 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5605 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5606 && out_mode == SFmode && out_n == 4
5607 && in_mode == SFmode && in_n == 4)
5608 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5609 break;
5610 CASE_CFN_FLOOR:
5611 if (VECTOR_UNIT_VSX_P (V2DFmode)
5612 && out_mode == DFmode && out_n == 2
5613 && in_mode == DFmode && in_n == 2)
5614 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5615 if (VECTOR_UNIT_VSX_P (V4SFmode)
5616 && out_mode == SFmode && out_n == 4
5617 && in_mode == SFmode && in_n == 4)
5618 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5619 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5620 && out_mode == SFmode && out_n == 4
5621 && in_mode == SFmode && in_n == 4)
5622 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5623 break;
5624 CASE_CFN_FMA:
5625 if (VECTOR_UNIT_VSX_P (V2DFmode)
5626 && out_mode == DFmode && out_n == 2
5627 && in_mode == DFmode && in_n == 2)
5628 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5629 if (VECTOR_UNIT_VSX_P (V4SFmode)
5630 && out_mode == SFmode && out_n == 4
5631 && in_mode == SFmode && in_n == 4)
5632 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5633 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5634 && out_mode == SFmode && out_n == 4
5635 && in_mode == SFmode && in_n == 4)
5636 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5637 break;
5638 CASE_CFN_TRUNC:
5639 if (VECTOR_UNIT_VSX_P (V2DFmode)
5640 && out_mode == DFmode && out_n == 2
5641 && in_mode == DFmode && in_n == 2)
5642 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5643 if (VECTOR_UNIT_VSX_P (V4SFmode)
5644 && out_mode == SFmode && out_n == 4
5645 && in_mode == SFmode && in_n == 4)
5646 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5647 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5648 && out_mode == SFmode && out_n == 4
5649 && in_mode == SFmode && in_n == 4)
5650 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5651 break;
5652 CASE_CFN_NEARBYINT:
5653 if (VECTOR_UNIT_VSX_P (V2DFmode)
5654 && flag_unsafe_math_optimizations
5655 && out_mode == DFmode && out_n == 2
5656 && in_mode == DFmode && in_n == 2)
5657 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5658 if (VECTOR_UNIT_VSX_P (V4SFmode)
5659 && flag_unsafe_math_optimizations
5660 && out_mode == SFmode && out_n == 4
5661 && in_mode == SFmode && in_n == 4)
5662 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5663 break;
5664 CASE_CFN_RINT:
5665 if (VECTOR_UNIT_VSX_P (V2DFmode)
5666 && !flag_trapping_math
5667 && out_mode == DFmode && out_n == 2
5668 && in_mode == DFmode && in_n == 2)
5669 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5670 if (VECTOR_UNIT_VSX_P (V4SFmode)
5671 && !flag_trapping_math
5672 && out_mode == SFmode && out_n == 4
5673 && in_mode == SFmode && in_n == 4)
5674 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5675 break;
5676 default:
5677 break;
5680 /* Generate calls to libmass if appropriate. */
5681 if (rs6000_veclib_handler)
5682 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5684 return NULL_TREE;
5687 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5689 static tree
5690 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5691 tree type_in)
5693 machine_mode in_mode, out_mode;
5694 int in_n, out_n;
5696 if (TARGET_DEBUG_BUILTIN)
5697 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5698 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5699 GET_MODE_NAME (TYPE_MODE (type_out)),
5700 GET_MODE_NAME (TYPE_MODE (type_in)));
5702 if (TREE_CODE (type_out) != VECTOR_TYPE
5703 || TREE_CODE (type_in) != VECTOR_TYPE
5704 || !TARGET_VECTORIZE_BUILTINS)
5705 return NULL_TREE;
5707 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5708 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5709 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5710 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5712 enum rs6000_builtins fn
5713 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5714 switch (fn)
5716 case RS6000_BUILTIN_RSQRTF:
5717 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5718 && out_mode == SFmode && out_n == 4
5719 && in_mode == SFmode && in_n == 4)
5720 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5721 break;
5722 case RS6000_BUILTIN_RSQRT:
5723 if (VECTOR_UNIT_VSX_P (V2DFmode)
5724 && out_mode == DFmode && out_n == 2
5725 && in_mode == DFmode && in_n == 2)
5726 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5727 break;
5728 case RS6000_BUILTIN_RECIPF:
5729 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5730 && out_mode == SFmode && out_n == 4
5731 && in_mode == SFmode && in_n == 4)
5732 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5733 break;
5734 case RS6000_BUILTIN_RECIP:
5735 if (VECTOR_UNIT_VSX_P (V2DFmode)
5736 && out_mode == DFmode && out_n == 2
5737 && in_mode == DFmode && in_n == 2)
5738 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5739 break;
5740 default:
5741 break;
5743 return NULL_TREE;
5746 /* Default CPU string for rs6000*_file_start functions. */
5747 static const char *rs6000_default_cpu;
5749 /* Do anything needed at the start of the asm file. */
5751 static void
5752 rs6000_file_start (void)
5754 char buffer[80];
5755 const char *start = buffer;
5756 FILE *file = asm_out_file;
5758 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5760 default_file_start ();
5762 if (flag_verbose_asm)
5764 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5766 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5768 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5769 start = "";
5772 if (global_options_set.x_rs6000_cpu_index)
5774 fprintf (file, "%s -mcpu=%s", start,
5775 processor_target_table[rs6000_cpu_index].name);
5776 start = "";
5779 if (global_options_set.x_rs6000_tune_index)
5781 fprintf (file, "%s -mtune=%s", start,
5782 processor_target_table[rs6000_tune_index].name);
5783 start = "";
5786 if (PPC405_ERRATUM77)
5788 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5789 start = "";
5792 #ifdef USING_ELFOS_H
5793 switch (rs6000_sdata)
5795 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5796 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5797 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5798 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5801 if (rs6000_sdata && g_switch_value)
5803 fprintf (file, "%s -G %d", start,
5804 g_switch_value);
5805 start = "";
5807 #endif
5809 if (*start == '\0')
5810 putc ('\n', file);
5813 #ifdef USING_ELFOS_H
5814 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5815 && !global_options_set.x_rs6000_cpu_index)
5817 fputs ("\t.machine ", asm_out_file);
5818 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5819 fputs ("power9\n", asm_out_file);
5820 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5821 fputs ("power8\n", asm_out_file);
5822 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5823 fputs ("power7\n", asm_out_file);
5824 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5825 fputs ("power6\n", asm_out_file);
5826 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5827 fputs ("power5\n", asm_out_file);
5828 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5829 fputs ("power4\n", asm_out_file);
5830 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5831 fputs ("ppc64\n", asm_out_file);
5832 else
5833 fputs ("ppc\n", asm_out_file);
5835 #endif
5837 if (DEFAULT_ABI == ABI_ELFv2)
5838 fprintf (file, "\t.abiversion 2\n");
5842 /* Return nonzero if this function is known to have a null epilogue. */
5845 direct_return (void)
5847 if (reload_completed)
5849 rs6000_stack_t *info = rs6000_stack_info ();
5851 if (info->first_gp_reg_save == 32
5852 && info->first_fp_reg_save == 64
5853 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5854 && ! info->lr_save_p
5855 && ! info->cr_save_p
5856 && info->vrsave_size == 0
5857 && ! info->push_p)
5858 return 1;
5861 return 0;
5864 /* Return the number of instructions it takes to form a constant in an
5865 integer register. */
5868 num_insns_constant_wide (HOST_WIDE_INT value)
5870 /* signed constant loadable with addi */
5871 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5872 return 1;
5874 /* constant loadable with addis */
5875 else if ((value & 0xffff) == 0
5876 && (value >> 31 == -1 || value >> 31 == 0))
5877 return 1;
5879 else if (TARGET_POWERPC64)
5881 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5882 HOST_WIDE_INT high = value >> 31;
5884 if (high == 0 || high == -1)
5885 return 2;
5887 high >>= 1;
5889 if (low == 0)
5890 return num_insns_constant_wide (high) + 1;
5891 else if (high == 0)
5892 return num_insns_constant_wide (low) + 1;
5893 else
5894 return (num_insns_constant_wide (high)
5895 + num_insns_constant_wide (low) + 1);
5898 else
5899 return 2;
5903 num_insns_constant (rtx op, machine_mode mode)
5905 HOST_WIDE_INT low, high;
5907 switch (GET_CODE (op))
5909 case CONST_INT:
5910 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5911 && rs6000_is_valid_and_mask (op, mode))
5912 return 2;
5913 else
5914 return num_insns_constant_wide (INTVAL (op));
5916 case CONST_WIDE_INT:
5918 int i;
5919 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5920 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5921 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5922 return ins;
5925 case CONST_DOUBLE:
5926 if (mode == SFmode || mode == SDmode)
5928 long l;
5930 if (DECIMAL_FLOAT_MODE_P (mode))
5931 REAL_VALUE_TO_TARGET_DECIMAL32
5932 (*CONST_DOUBLE_REAL_VALUE (op), l);
5933 else
5934 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5935 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5938 long l[2];
5939 if (DECIMAL_FLOAT_MODE_P (mode))
5940 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
5941 else
5942 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5943 high = l[WORDS_BIG_ENDIAN == 0];
5944 low = l[WORDS_BIG_ENDIAN != 0];
5946 if (TARGET_32BIT)
5947 return (num_insns_constant_wide (low)
5948 + num_insns_constant_wide (high));
5949 else
5951 if ((high == 0 && low >= 0)
5952 || (high == -1 && low < 0))
5953 return num_insns_constant_wide (low);
5955 else if (rs6000_is_valid_and_mask (op, mode))
5956 return 2;
5958 else if (low == 0)
5959 return num_insns_constant_wide (high) + 1;
5961 else
5962 return (num_insns_constant_wide (high)
5963 + num_insns_constant_wide (low) + 1);
5966 default:
5967 gcc_unreachable ();
5971 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5972 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5973 corresponding element of the vector, but for V4SFmode and V2SFmode,
5974 the corresponding "float" is interpreted as an SImode integer. */
5976 HOST_WIDE_INT
5977 const_vector_elt_as_int (rtx op, unsigned int elt)
5979 rtx tmp;
5981 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5982 gcc_assert (GET_MODE (op) != V2DImode
5983 && GET_MODE (op) != V2DFmode);
5985 tmp = CONST_VECTOR_ELT (op, elt);
5986 if (GET_MODE (op) == V4SFmode
5987 || GET_MODE (op) == V2SFmode)
5988 tmp = gen_lowpart (SImode, tmp);
5989 return INTVAL (tmp);
5992 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5993 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5994 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5995 all items are set to the same value and contain COPIES replicas of the
5996 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5997 operand and the others are set to the value of the operand's msb. */
5999 static bool
6000 vspltis_constant (rtx op, unsigned step, unsigned copies)
6002 machine_mode mode = GET_MODE (op);
6003 machine_mode inner = GET_MODE_INNER (mode);
6005 unsigned i;
6006 unsigned nunits;
6007 unsigned bitsize;
6008 unsigned mask;
6010 HOST_WIDE_INT val;
6011 HOST_WIDE_INT splat_val;
6012 HOST_WIDE_INT msb_val;
6014 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6015 return false;
6017 nunits = GET_MODE_NUNITS (mode);
6018 bitsize = GET_MODE_BITSIZE (inner);
6019 mask = GET_MODE_MASK (inner);
6021 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6022 splat_val = val;
6023 msb_val = val >= 0 ? 0 : -1;
6025 /* Construct the value to be splatted, if possible. If not, return 0. */
6026 for (i = 2; i <= copies; i *= 2)
6028 HOST_WIDE_INT small_val;
6029 bitsize /= 2;
6030 small_val = splat_val >> bitsize;
6031 mask >>= bitsize;
6032 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
6033 return false;
6034 splat_val = small_val;
6037 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6038 if (EASY_VECTOR_15 (splat_val))
6041 /* Also check if we can splat, and then add the result to itself. Do so if
6042 the value is positive, of if the splat instruction is using OP's mode;
6043 for splat_val < 0, the splat and the add should use the same mode. */
6044 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6045 && (splat_val >= 0 || (step == 1 && copies == 1)))
6048 /* Also check if are loading up the most significant bit which can be done by
6049 loading up -1 and shifting the value left by -1. */
6050 else if (EASY_VECTOR_MSB (splat_val, inner))
6053 else
6054 return false;
6056 /* Check if VAL is present in every STEP-th element, and the
6057 other elements are filled with its most significant bit. */
6058 for (i = 1; i < nunits; ++i)
6060 HOST_WIDE_INT desired_val;
6061 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6062 if ((i & (step - 1)) == 0)
6063 desired_val = val;
6064 else
6065 desired_val = msb_val;
6067 if (desired_val != const_vector_elt_as_int (op, elt))
6068 return false;
6071 return true;
6074 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6075 instruction, filling in the bottom elements with 0 or -1.
6077 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6078 for the number of zeroes to shift in, or negative for the number of 0xff
6079 bytes to shift in.
6081 OP is a CONST_VECTOR. */
6084 vspltis_shifted (rtx op)
6086 machine_mode mode = GET_MODE (op);
6087 machine_mode inner = GET_MODE_INNER (mode);
6089 unsigned i, j;
6090 unsigned nunits;
6091 unsigned mask;
6093 HOST_WIDE_INT val;
6095 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6096 return false;
6098 /* We need to create pseudo registers to do the shift, so don't recognize
6099 shift vector constants after reload. */
6100 if (!can_create_pseudo_p ())
6101 return false;
6103 nunits = GET_MODE_NUNITS (mode);
6104 mask = GET_MODE_MASK (inner);
6106 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6108 /* Check if the value can really be the operand of a vspltis[bhw]. */
6109 if (EASY_VECTOR_15 (val))
6112 /* Also check if we are loading up the most significant bit which can be done
6113 by loading up -1 and shifting the value left by -1. */
6114 else if (EASY_VECTOR_MSB (val, inner))
6117 else
6118 return 0;
6120 /* Check if VAL is present in every STEP-th element until we find elements
6121 that are 0 or all 1 bits. */
6122 for (i = 1; i < nunits; ++i)
6124 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6125 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6127 /* If the value isn't the splat value, check for the remaining elements
6128 being 0/-1. */
6129 if (val != elt_val)
6131 if (elt_val == 0)
6133 for (j = i+1; j < nunits; ++j)
6135 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6136 if (const_vector_elt_as_int (op, elt2) != 0)
6137 return 0;
6140 return (nunits - i) * GET_MODE_SIZE (inner);
6143 else if ((elt_val & mask) == mask)
6145 for (j = i+1; j < nunits; ++j)
6147 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6148 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6149 return 0;
6152 return -((nunits - i) * GET_MODE_SIZE (inner));
6155 else
6156 return 0;
6160 /* If all elements are equal, we don't need to do VLSDOI. */
6161 return 0;
6165 /* Return true if OP is of the given MODE and can be synthesized
6166 with a vspltisb, vspltish or vspltisw. */
6168 bool
6169 easy_altivec_constant (rtx op, machine_mode mode)
6171 unsigned step, copies;
6173 if (mode == VOIDmode)
6174 mode = GET_MODE (op);
6175 else if (mode != GET_MODE (op))
6176 return false;
6178 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6179 constants. */
6180 if (mode == V2DFmode)
6181 return zero_constant (op, mode);
6183 else if (mode == V2DImode)
6185 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6186 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6187 return false;
6189 if (zero_constant (op, mode))
6190 return true;
6192 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6193 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6194 return true;
6196 return false;
6199 /* V1TImode is a special container for TImode. Ignore for now. */
6200 else if (mode == V1TImode)
6201 return false;
6203 /* Start with a vspltisw. */
6204 step = GET_MODE_NUNITS (mode) / 4;
6205 copies = 1;
6207 if (vspltis_constant (op, step, copies))
6208 return true;
6210 /* Then try with a vspltish. */
6211 if (step == 1)
6212 copies <<= 1;
6213 else
6214 step >>= 1;
6216 if (vspltis_constant (op, step, copies))
6217 return true;
6219 /* And finally a vspltisb. */
6220 if (step == 1)
6221 copies <<= 1;
6222 else
6223 step >>= 1;
6225 if (vspltis_constant (op, step, copies))
6226 return true;
6228 if (vspltis_shifted (op) != 0)
6229 return true;
6231 return false;
6234 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6235 result is OP. Abort if it is not possible. */
6238 gen_easy_altivec_constant (rtx op)
6240 machine_mode mode = GET_MODE (op);
6241 int nunits = GET_MODE_NUNITS (mode);
6242 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6243 unsigned step = nunits / 4;
6244 unsigned copies = 1;
6246 /* Start with a vspltisw. */
6247 if (vspltis_constant (op, step, copies))
6248 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6250 /* Then try with a vspltish. */
6251 if (step == 1)
6252 copies <<= 1;
6253 else
6254 step >>= 1;
6256 if (vspltis_constant (op, step, copies))
6257 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6259 /* And finally a vspltisb. */
6260 if (step == 1)
6261 copies <<= 1;
6262 else
6263 step >>= 1;
6265 if (vspltis_constant (op, step, copies))
6266 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6268 gcc_unreachable ();
6271 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6272 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6274 Return the number of instructions needed (1 or 2) into the address pointed
6275 via NUM_INSNS_PTR.
6277 Return the constant that is being split via CONSTANT_PTR. */
6279 bool
6280 xxspltib_constant_p (rtx op,
6281 machine_mode mode,
6282 int *num_insns_ptr,
6283 int *constant_ptr)
6285 size_t nunits = GET_MODE_NUNITS (mode);
6286 size_t i;
6287 HOST_WIDE_INT value;
6288 rtx element;
6290 /* Set the returned values to out of bound values. */
6291 *num_insns_ptr = -1;
6292 *constant_ptr = 256;
6294 if (!TARGET_P9_VECTOR)
6295 return false;
6297 if (mode == VOIDmode)
6298 mode = GET_MODE (op);
6300 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6301 return false;
6303 /* Handle (vec_duplicate <constant>). */
6304 if (GET_CODE (op) == VEC_DUPLICATE)
6306 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6307 && mode != V2DImode)
6308 return false;
6310 element = XEXP (op, 0);
6311 if (!CONST_INT_P (element))
6312 return false;
6314 value = INTVAL (element);
6315 if (!IN_RANGE (value, -128, 127))
6316 return false;
6319 /* Handle (const_vector [...]). */
6320 else if (GET_CODE (op) == CONST_VECTOR)
6322 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6323 && mode != V2DImode)
6324 return false;
6326 element = CONST_VECTOR_ELT (op, 0);
6327 if (!CONST_INT_P (element))
6328 return false;
6330 value = INTVAL (element);
6331 if (!IN_RANGE (value, -128, 127))
6332 return false;
6334 for (i = 1; i < nunits; i++)
6336 element = CONST_VECTOR_ELT (op, i);
6337 if (!CONST_INT_P (element))
6338 return false;
6340 if (value != INTVAL (element))
6341 return false;
6345 /* Handle integer constants being loaded into the upper part of the VSX
6346 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6347 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6348 else if (CONST_INT_P (op))
6350 if (!SCALAR_INT_MODE_P (mode))
6351 return false;
6353 value = INTVAL (op);
6354 if (!IN_RANGE (value, -128, 127))
6355 return false;
6357 if (!IN_RANGE (value, -1, 0))
6359 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6360 return false;
6362 if (EASY_VECTOR_15 (value))
6363 return false;
6367 else
6368 return false;
6370 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6371 sign extend. Special case 0/-1 to allow getting any VSX register instead
6372 of an Altivec register. */
6373 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6374 && EASY_VECTOR_15 (value))
6375 return false;
6377 /* Return # of instructions and the constant byte for XXSPLTIB. */
6378 if (mode == V16QImode)
6379 *num_insns_ptr = 1;
6381 else if (IN_RANGE (value, -1, 0))
6382 *num_insns_ptr = 1;
6384 else
6385 *num_insns_ptr = 2;
6387 *constant_ptr = (int) value;
6388 return true;
6391 const char *
6392 output_vec_const_move (rtx *operands)
6394 int cst, cst2, shift;
6395 machine_mode mode;
6396 rtx dest, vec;
6398 dest = operands[0];
6399 vec = operands[1];
6400 mode = GET_MODE (dest);
6402 if (TARGET_VSX)
6404 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6405 int xxspltib_value = 256;
6406 int num_insns = -1;
6408 if (zero_constant (vec, mode))
6410 if (TARGET_P9_VECTOR)
6411 return "xxspltib %x0,0";
6413 else if (dest_vmx_p)
6414 return "vspltisw %0,0";
6416 else
6417 return "xxlxor %x0,%x0,%x0";
6420 if (all_ones_constant (vec, mode))
6422 if (TARGET_P9_VECTOR)
6423 return "xxspltib %x0,255";
6425 else if (dest_vmx_p)
6426 return "vspltisw %0,-1";
6428 else if (TARGET_P8_VECTOR)
6429 return "xxlorc %x0,%x0,%x0";
6431 else
6432 gcc_unreachable ();
6435 if (TARGET_P9_VECTOR
6436 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6438 if (num_insns == 1)
6440 operands[2] = GEN_INT (xxspltib_value & 0xff);
6441 return "xxspltib %x0,%2";
6444 return "#";
6448 if (TARGET_ALTIVEC)
6450 rtx splat_vec;
6452 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6453 if (zero_constant (vec, mode))
6454 return "vspltisw %0,0";
6456 if (all_ones_constant (vec, mode))
6457 return "vspltisw %0,-1";
6459 /* Do we need to construct a value using VSLDOI? */
6460 shift = vspltis_shifted (vec);
6461 if (shift != 0)
6462 return "#";
6464 splat_vec = gen_easy_altivec_constant (vec);
6465 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6466 operands[1] = XEXP (splat_vec, 0);
6467 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6468 return "#";
6470 switch (GET_MODE (splat_vec))
6472 case V4SImode:
6473 return "vspltisw %0,%1";
6475 case V8HImode:
6476 return "vspltish %0,%1";
6478 case V16QImode:
6479 return "vspltisb %0,%1";
6481 default:
6482 gcc_unreachable ();
6486 gcc_assert (TARGET_SPE);
6488 /* Vector constant 0 is handled as a splitter of V2SI, and in the
6489 pattern of V1DI, V4HI, and V2SF.
6491 FIXME: We should probably return # and add post reload
6492 splitters for these, but this way is so easy ;-). */
6493 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
6494 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
6495 operands[1] = CONST_VECTOR_ELT (vec, 0);
6496 operands[2] = CONST_VECTOR_ELT (vec, 1);
6497 if (cst == cst2)
6498 return "li %0,%1\n\tevmergelo %0,%0,%0";
6499 else if (WORDS_BIG_ENDIAN)
6500 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
6501 else
6502 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
6505 /* Initialize TARGET of vector PAIRED to VALS. */
6507 void
6508 paired_expand_vector_init (rtx target, rtx vals)
6510 machine_mode mode = GET_MODE (target);
6511 int n_elts = GET_MODE_NUNITS (mode);
6512 int n_var = 0;
6513 rtx x, new_rtx, tmp, constant_op, op1, op2;
6514 int i;
6516 for (i = 0; i < n_elts; ++i)
6518 x = XVECEXP (vals, 0, i);
6519 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6520 ++n_var;
6522 if (n_var == 0)
6524 /* Load from constant pool. */
6525 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6526 return;
6529 if (n_var == 2)
6531 /* The vector is initialized only with non-constants. */
6532 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6533 XVECEXP (vals, 0, 1));
6535 emit_move_insn (target, new_rtx);
6536 return;
6539 /* One field is non-constant and the other one is a constant. Load the
6540 constant from the constant pool and use ps_merge instruction to
6541 construct the whole vector. */
6542 op1 = XVECEXP (vals, 0, 0);
6543 op2 = XVECEXP (vals, 0, 1);
6545 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6547 tmp = gen_reg_rtx (GET_MODE (constant_op));
6548 emit_move_insn (tmp, constant_op);
6550 if (CONSTANT_P (op1))
6551 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6552 else
6553 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6555 emit_move_insn (target, new_rtx);
6558 void
6559 paired_expand_vector_move (rtx operands[])
6561 rtx op0 = operands[0], op1 = operands[1];
6563 emit_move_insn (op0, op1);
6566 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6567 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6568 operands for the relation operation COND. This is a recursive
6569 function. */
6571 static void
6572 paired_emit_vector_compare (enum rtx_code rcode,
6573 rtx dest, rtx op0, rtx op1,
6574 rtx cc_op0, rtx cc_op1)
6576 rtx tmp = gen_reg_rtx (V2SFmode);
6577 rtx tmp1, max, min;
6579 gcc_assert (TARGET_PAIRED_FLOAT);
6580 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6582 switch (rcode)
6584 case LT:
6585 case LTU:
6586 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6587 return;
6588 case GE:
6589 case GEU:
6590 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6591 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6592 return;
6593 case LE:
6594 case LEU:
6595 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6596 return;
6597 case GT:
6598 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6599 return;
6600 case EQ:
6601 tmp1 = gen_reg_rtx (V2SFmode);
6602 max = gen_reg_rtx (V2SFmode);
6603 min = gen_reg_rtx (V2SFmode);
6604 gen_reg_rtx (V2SFmode);
6606 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6607 emit_insn (gen_selv2sf4
6608 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6609 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6610 emit_insn (gen_selv2sf4
6611 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6612 emit_insn (gen_subv2sf3 (tmp1, min, max));
6613 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6614 return;
6615 case NE:
6616 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6617 return;
6618 case UNLE:
6619 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6620 return;
6621 case UNLT:
6622 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6623 return;
6624 case UNGE:
6625 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6626 return;
6627 case UNGT:
6628 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6629 return;
6630 default:
6631 gcc_unreachable ();
6634 return;
6637 /* Emit vector conditional expression.
6638 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6639 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6642 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6643 rtx cond, rtx cc_op0, rtx cc_op1)
6645 enum rtx_code rcode = GET_CODE (cond);
6647 if (!TARGET_PAIRED_FLOAT)
6648 return 0;
6650 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6652 return 1;
6655 /* Initialize vector TARGET to VALS. */
6657 void
6658 rs6000_expand_vector_init (rtx target, rtx vals)
6660 machine_mode mode = GET_MODE (target);
6661 machine_mode inner_mode = GET_MODE_INNER (mode);
6662 int n_elts = GET_MODE_NUNITS (mode);
6663 int n_var = 0, one_var = -1;
6664 bool all_same = true, all_const_zero = true;
6665 rtx x, mem;
6666 int i;
6668 for (i = 0; i < n_elts; ++i)
6670 x = XVECEXP (vals, 0, i);
6671 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6672 ++n_var, one_var = i;
6673 else if (x != CONST0_RTX (inner_mode))
6674 all_const_zero = false;
6676 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6677 all_same = false;
6680 if (n_var == 0)
6682 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6683 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6684 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6686 /* Zero register. */
6687 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (mode, target, target)));
6688 return;
6690 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6692 /* Splat immediate. */
6693 emit_insn (gen_rtx_SET (target, const_vec));
6694 return;
6696 else
6698 /* Load from constant pool. */
6699 emit_move_insn (target, const_vec);
6700 return;
6704 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6705 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6707 rtx op0 = XVECEXP (vals, 0, 0);
6708 rtx op1 = XVECEXP (vals, 0, 1);
6709 if (all_same)
6711 if (!MEM_P (op0) && !REG_P (op0))
6712 op0 = force_reg (inner_mode, op0);
6713 if (mode == V2DFmode)
6714 emit_insn (gen_vsx_splat_v2df (target, op0));
6715 else
6716 emit_insn (gen_vsx_splat_v2di (target, op0));
6718 else
6720 op0 = force_reg (inner_mode, op0);
6721 op1 = force_reg (inner_mode, op1);
6722 if (mode == V2DFmode)
6723 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
6724 else
6725 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
6727 return;
6730 /* Word values on ISA 3.0 can use mtvsrws, lxvwsx, or vspltisw. V4SF is
6731 complicated since scalars are stored as doubles in the registers. */
6732 if (TARGET_P9_VECTOR && mode == V4SImode && all_same
6733 && VECTOR_MEM_VSX_P (mode))
6735 emit_insn (gen_vsx_splat_v4si (target, XVECEXP (vals, 0, 0)));
6736 return;
6739 /* With single precision floating point on VSX, know that internally single
6740 precision is actually represented as a double, and either make 2 V2DF
6741 vectors, and convert these vectors to single precision, or do one
6742 conversion, and splat the result to the other elements. */
6743 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
6745 if (all_same)
6747 rtx op0 = XVECEXP (vals, 0, 0);
6749 if (TARGET_P9_VECTOR)
6750 emit_insn (gen_vsx_splat_v4sf (target, op0));
6752 else
6754 rtx freg = gen_reg_rtx (V4SFmode);
6755 rtx sreg = force_reg (SFmode, op0);
6756 rtx cvt = (TARGET_XSCVDPSPN
6757 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6758 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6760 emit_insn (cvt);
6761 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6762 const0_rtx));
6765 else
6767 rtx dbl_even = gen_reg_rtx (V2DFmode);
6768 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6769 rtx flt_even = gen_reg_rtx (V4SFmode);
6770 rtx flt_odd = gen_reg_rtx (V4SFmode);
6771 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6772 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6773 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6774 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6776 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6777 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6778 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6779 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6780 rs6000_expand_extract_even (target, flt_even, flt_odd);
6782 return;
6785 /* Store value to stack temp. Load vector element. Splat. However, splat
6786 of 64-bit items is not supported on Altivec. */
6787 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6789 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6790 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6791 XVECEXP (vals, 0, 0));
6792 x = gen_rtx_UNSPEC (VOIDmode,
6793 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6794 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6795 gen_rtvec (2,
6796 gen_rtx_SET (target, mem),
6797 x)));
6798 x = gen_rtx_VEC_SELECT (inner_mode, target,
6799 gen_rtx_PARALLEL (VOIDmode,
6800 gen_rtvec (1, const0_rtx)));
6801 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6802 return;
6805 /* One field is non-constant. Load constant then overwrite
6806 varying field. */
6807 if (n_var == 1)
6809 rtx copy = copy_rtx (vals);
6811 /* Load constant part of vector, substitute neighboring value for
6812 varying element. */
6813 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6814 rs6000_expand_vector_init (target, copy);
6816 /* Insert variable. */
6817 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6818 return;
6821 /* Construct the vector in memory one field at a time
6822 and load the whole vector. */
6823 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6824 for (i = 0; i < n_elts; i++)
6825 emit_move_insn (adjust_address_nv (mem, inner_mode,
6826 i * GET_MODE_SIZE (inner_mode)),
6827 XVECEXP (vals, 0, i));
6828 emit_move_insn (target, mem);
6831 /* Set field ELT of TARGET to VAL. */
6833 void
6834 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6836 machine_mode mode = GET_MODE (target);
6837 machine_mode inner_mode = GET_MODE_INNER (mode);
6838 rtx reg = gen_reg_rtx (mode);
6839 rtx mask, mem, x;
6840 int width = GET_MODE_SIZE (inner_mode);
6841 int i;
6843 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6845 rtx (*set_func) (rtx, rtx, rtx, rtx)
6846 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
6847 emit_insn (set_func (target, target, val, GEN_INT (elt)));
6848 return;
6851 /* Simplify setting single element vectors like V1TImode. */
6852 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6854 emit_move_insn (target, gen_lowpart (mode, val));
6855 return;
6858 /* Load single variable value. */
6859 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6860 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6861 x = gen_rtx_UNSPEC (VOIDmode,
6862 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6863 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6864 gen_rtvec (2,
6865 gen_rtx_SET (reg, mem),
6866 x)));
6868 /* Linear sequence. */
6869 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6870 for (i = 0; i < 16; ++i)
6871 XVECEXP (mask, 0, i) = GEN_INT (i);
6873 /* Set permute mask to insert element into target. */
6874 for (i = 0; i < width; ++i)
6875 XVECEXP (mask, 0, elt*width + i)
6876 = GEN_INT (i + 0x10);
6877 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6879 if (BYTES_BIG_ENDIAN)
6880 x = gen_rtx_UNSPEC (mode,
6881 gen_rtvec (3, target, reg,
6882 force_reg (V16QImode, x)),
6883 UNSPEC_VPERM);
6884 else
6886 if (TARGET_P9_VECTOR)
6887 x = gen_rtx_UNSPEC (mode,
6888 gen_rtvec (3, target, reg,
6889 force_reg (V16QImode, x)),
6890 UNSPEC_VPERMR);
6891 else
6893 /* Invert selector. We prefer to generate VNAND on P8 so
6894 that future fusion opportunities can kick in, but must
6895 generate VNOR elsewhere. */
6896 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6897 rtx iorx = (TARGET_P8_VECTOR
6898 ? gen_rtx_IOR (V16QImode, notx, notx)
6899 : gen_rtx_AND (V16QImode, notx, notx));
6900 rtx tmp = gen_reg_rtx (V16QImode);
6901 emit_insn (gen_rtx_SET (tmp, iorx));
6903 /* Permute with operands reversed and adjusted selector. */
6904 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6905 UNSPEC_VPERM);
6909 emit_insn (gen_rtx_SET (target, x));
6912 /* Extract field ELT from VEC into TARGET. */
6914 void
6915 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6917 machine_mode mode = GET_MODE (vec);
6918 machine_mode inner_mode = GET_MODE_INNER (mode);
6919 rtx mem;
6921 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6923 switch (mode)
6925 default:
6926 break;
6927 case V1TImode:
6928 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6929 emit_move_insn (target, gen_lowpart (TImode, vec));
6930 break;
6931 case V2DFmode:
6932 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6933 return;
6934 case V2DImode:
6935 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6936 return;
6937 case V4SFmode:
6938 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6939 return;
6940 case V16QImode:
6941 if (TARGET_VEXTRACTUB)
6943 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6944 return;
6946 else
6947 break;
6948 case V8HImode:
6949 if (TARGET_VEXTRACTUB)
6951 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6952 return;
6954 else
6955 break;
6956 case V4SImode:
6957 if (TARGET_VEXTRACTUB)
6959 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6960 return;
6962 else
6963 break;
6967 gcc_assert (CONST_INT_P (elt));
6969 /* Allocate mode-sized buffer. */
6970 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6972 emit_move_insn (mem, vec);
6974 /* Add offset to field within buffer matching vector element. */
6975 mem = adjust_address_nv (mem, inner_mode,
6976 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6978 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6981 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
6983 bool
6984 invalid_e500_subreg (rtx op, machine_mode mode)
6986 if (TARGET_E500_DOUBLE)
6988 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
6989 subreg:TI and reg:TF. Decimal float modes are like integer
6990 modes (only low part of each register used) for this
6991 purpose. */
6992 if (GET_CODE (op) == SUBREG
6993 && (mode == SImode || mode == DImode || mode == TImode
6994 || mode == DDmode || mode == TDmode || mode == PTImode)
6995 && REG_P (SUBREG_REG (op))
6996 && (GET_MODE (SUBREG_REG (op)) == DFmode
6997 || GET_MODE (SUBREG_REG (op)) == TFmode
6998 || GET_MODE (SUBREG_REG (op)) == IFmode
6999 || GET_MODE (SUBREG_REG (op)) == KFmode))
7000 return true;
7002 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
7003 reg:TI. */
7004 if (GET_CODE (op) == SUBREG
7005 && (mode == DFmode || mode == TFmode || mode == IFmode
7006 || mode == KFmode)
7007 && REG_P (SUBREG_REG (op))
7008 && (GET_MODE (SUBREG_REG (op)) == DImode
7009 || GET_MODE (SUBREG_REG (op)) == TImode
7010 || GET_MODE (SUBREG_REG (op)) == PTImode
7011 || GET_MODE (SUBREG_REG (op)) == DDmode
7012 || GET_MODE (SUBREG_REG (op)) == TDmode))
7013 return true;
7016 if (TARGET_SPE
7017 && GET_CODE (op) == SUBREG
7018 && mode == SImode
7019 && REG_P (SUBREG_REG (op))
7020 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
7021 return true;
7023 return false;
7026 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7027 selects whether the alignment is abi mandated, optional, or
7028 both abi and optional alignment. */
7030 unsigned int
7031 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7033 if (how != align_opt)
7035 if (TREE_CODE (type) == VECTOR_TYPE)
7037 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
7038 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
7040 if (align < 64)
7041 align = 64;
7043 else if (align < 128)
7044 align = 128;
7046 else if (TARGET_E500_DOUBLE
7047 && TREE_CODE (type) == REAL_TYPE
7048 && TYPE_MODE (type) == DFmode)
7050 if (align < 64)
7051 align = 64;
7055 if (how != align_abi)
7057 if (TREE_CODE (type) == ARRAY_TYPE
7058 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7060 if (align < BITS_PER_WORD)
7061 align = BITS_PER_WORD;
7065 return align;
7068 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7070 bool
7071 rs6000_special_adjust_field_align_p (tree field, unsigned int computed)
7073 if (TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7075 if (computed != 128)
7077 static bool warned;
7078 if (!warned && warn_psabi)
7080 warned = true;
7081 inform (input_location,
7082 "the layout of aggregates containing vectors with"
7083 " %d-byte alignment has changed in GCC 5",
7084 computed / BITS_PER_UNIT);
7087 /* In current GCC there is no special case. */
7088 return false;
7091 return false;
7094 /* AIX increases natural record alignment to doubleword if the first
7095 field is an FP double while the FP fields remain word aligned. */
7097 unsigned int
7098 rs6000_special_round_type_align (tree type, unsigned int computed,
7099 unsigned int specified)
7101 unsigned int align = MAX (computed, specified);
7102 tree field = TYPE_FIELDS (type);
7104 /* Skip all non field decls */
7105 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7106 field = DECL_CHAIN (field);
7108 if (field != NULL && field != type)
7110 type = TREE_TYPE (field);
7111 while (TREE_CODE (type) == ARRAY_TYPE)
7112 type = TREE_TYPE (type);
7114 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7115 align = MAX (align, 64);
7118 return align;
7121 /* Darwin increases record alignment to the natural alignment of
7122 the first field. */
7124 unsigned int
7125 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7126 unsigned int specified)
7128 unsigned int align = MAX (computed, specified);
7130 if (TYPE_PACKED (type))
7131 return align;
7133 /* Find the first field, looking down into aggregates. */
7134 do {
7135 tree field = TYPE_FIELDS (type);
7136 /* Skip all non field decls */
7137 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7138 field = DECL_CHAIN (field);
7139 if (! field)
7140 break;
7141 /* A packed field does not contribute any extra alignment. */
7142 if (DECL_PACKED (field))
7143 return align;
7144 type = TREE_TYPE (field);
7145 while (TREE_CODE (type) == ARRAY_TYPE)
7146 type = TREE_TYPE (type);
7147 } while (AGGREGATE_TYPE_P (type));
7149 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7150 align = MAX (align, TYPE_ALIGN (type));
7152 return align;
7155 /* Return 1 for an operand in small memory on V.4/eabi. */
7158 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7159 machine_mode mode ATTRIBUTE_UNUSED)
7161 #if TARGET_ELF
7162 rtx sym_ref;
7164 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7165 return 0;
7167 if (DEFAULT_ABI != ABI_V4)
7168 return 0;
7170 /* Vector and float memory instructions have a limited offset on the
7171 SPE, so using a vector or float variable directly as an operand is
7172 not useful. */
7173 if (TARGET_SPE
7174 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
7175 return 0;
7177 if (GET_CODE (op) == SYMBOL_REF)
7178 sym_ref = op;
7180 else if (GET_CODE (op) != CONST
7181 || GET_CODE (XEXP (op, 0)) != PLUS
7182 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7183 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7184 return 0;
7186 else
7188 rtx sum = XEXP (op, 0);
7189 HOST_WIDE_INT summand;
7191 /* We have to be careful here, because it is the referenced address
7192 that must be 32k from _SDA_BASE_, not just the symbol. */
7193 summand = INTVAL (XEXP (sum, 1));
7194 if (summand < 0 || summand > g_switch_value)
7195 return 0;
7197 sym_ref = XEXP (sum, 0);
7200 return SYMBOL_REF_SMALL_P (sym_ref);
7201 #else
7202 return 0;
7203 #endif
7206 /* Return true if either operand is a general purpose register. */
7208 bool
7209 gpr_or_gpr_p (rtx op0, rtx op1)
7211 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7212 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7215 /* Return true if this is a move direct operation between GPR registers and
7216 floating point/VSX registers. */
7218 bool
7219 direct_move_p (rtx op0, rtx op1)
7221 int regno0, regno1;
7223 if (!REG_P (op0) || !REG_P (op1))
7224 return false;
7226 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7227 return false;
7229 regno0 = REGNO (op0);
7230 regno1 = REGNO (op1);
7231 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7232 return false;
7234 if (INT_REGNO_P (regno0))
7235 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7237 else if (INT_REGNO_P (regno1))
7239 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7240 return true;
7242 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7243 return true;
7246 return false;
7249 /* Return true if the OFFSET is valid for the quad address instructions that
7250 use d-form (register + offset) addressing. */
7252 static inline bool
7253 quad_address_offset_p (HOST_WIDE_INT offset)
7255 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7258 /* Return true if the ADDR is an acceptable address for a quad memory
7259 operation of mode MODE (either LQ/STQ for general purpose registers, or
7260 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7261 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7262 3.0 LXV/STXV instruction. */
7264 bool
7265 quad_address_p (rtx addr, machine_mode mode, bool strict)
7267 rtx op0, op1;
7269 if (GET_MODE_SIZE (mode) != 16)
7270 return false;
7272 if (legitimate_indirect_address_p (addr, strict))
7273 return true;
7275 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
7276 return false;
7278 if (GET_CODE (addr) != PLUS)
7279 return false;
7281 op0 = XEXP (addr, 0);
7282 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7283 return false;
7285 op1 = XEXP (addr, 1);
7286 if (!CONST_INT_P (op1))
7287 return false;
7289 return quad_address_offset_p (INTVAL (op1));
7292 /* Return true if this is a load or store quad operation. This function does
7293 not handle the atomic quad memory instructions. */
7295 bool
7296 quad_load_store_p (rtx op0, rtx op1)
7298 bool ret;
7300 if (!TARGET_QUAD_MEMORY)
7301 ret = false;
7303 else if (REG_P (op0) && MEM_P (op1))
7304 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7305 && quad_memory_operand (op1, GET_MODE (op1))
7306 && !reg_overlap_mentioned_p (op0, op1));
7308 else if (MEM_P (op0) && REG_P (op1))
7309 ret = (quad_memory_operand (op0, GET_MODE (op0))
7310 && quad_int_reg_operand (op1, GET_MODE (op1)));
7312 else
7313 ret = false;
7315 if (TARGET_DEBUG_ADDR)
7317 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7318 ret ? "true" : "false");
7319 debug_rtx (gen_rtx_SET (op0, op1));
7322 return ret;
7325 /* Given an address, return a constant offset term if one exists. */
7327 static rtx
7328 address_offset (rtx op)
7330 if (GET_CODE (op) == PRE_INC
7331 || GET_CODE (op) == PRE_DEC)
7332 op = XEXP (op, 0);
7333 else if (GET_CODE (op) == PRE_MODIFY
7334 || GET_CODE (op) == LO_SUM)
7335 op = XEXP (op, 1);
7337 if (GET_CODE (op) == CONST)
7338 op = XEXP (op, 0);
7340 if (GET_CODE (op) == PLUS)
7341 op = XEXP (op, 1);
7343 if (CONST_INT_P (op))
7344 return op;
7346 return NULL_RTX;
7349 /* Return true if the MEM operand is a memory operand suitable for use
7350 with a (full width, possibly multiple) gpr load/store. On
7351 powerpc64 this means the offset must be divisible by 4.
7352 Implements 'Y' constraint.
7354 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7355 a constraint function we know the operand has satisfied a suitable
7356 memory predicate. Also accept some odd rtl generated by reload
7357 (see rs6000_legitimize_reload_address for various forms). It is
7358 important that reload rtl be accepted by appropriate constraints
7359 but not by the operand predicate.
7361 Offsetting a lo_sum should not be allowed, except where we know by
7362 alignment that a 32k boundary is not crossed, but see the ???
7363 comment in rs6000_legitimize_reload_address. Note that by
7364 "offsetting" here we mean a further offset to access parts of the
7365 MEM. It's fine to have a lo_sum where the inner address is offset
7366 from a sym, since the same sym+offset will appear in the high part
7367 of the address calculation. */
7369 bool
7370 mem_operand_gpr (rtx op, machine_mode mode)
7372 unsigned HOST_WIDE_INT offset;
7373 int extra;
7374 rtx addr = XEXP (op, 0);
7376 op = address_offset (addr);
7377 if (op == NULL_RTX)
7378 return true;
7380 offset = INTVAL (op);
7381 if (TARGET_POWERPC64 && (offset & 3) != 0)
7382 return false;
7384 if (mode_supports_vsx_dform_quad (mode)
7385 && !quad_address_offset_p (offset))
7386 return false;
7388 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7389 if (extra < 0)
7390 extra = 0;
7392 if (GET_CODE (addr) == LO_SUM)
7393 /* For lo_sum addresses, we must allow any offset except one that
7394 causes a wrap, so test only the low 16 bits. */
7395 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7397 return offset + 0x8000 < 0x10000u - extra;
7400 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7402 static bool
7403 reg_offset_addressing_ok_p (machine_mode mode)
7405 switch (mode)
7407 case V16QImode:
7408 case V8HImode:
7409 case V4SFmode:
7410 case V4SImode:
7411 case V2DFmode:
7412 case V2DImode:
7413 case V1TImode:
7414 case TImode:
7415 case TFmode:
7416 case KFmode:
7417 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7418 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7419 a vector mode, if we want to use the VSX registers to move it around,
7420 we need to restrict ourselves to reg+reg addressing. Similarly for
7421 IEEE 128-bit floating point that is passed in a single vector
7422 register. */
7423 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7424 return mode_supports_vsx_dform_quad (mode);
7425 break;
7427 case V4HImode:
7428 case V2SImode:
7429 case V1DImode:
7430 case V2SFmode:
7431 /* Paired vector modes. Only reg+reg addressing is valid. */
7432 if (TARGET_PAIRED_FLOAT)
7433 return false;
7434 break;
7436 case SDmode:
7437 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7438 addressing for the LFIWZX and STFIWX instructions. */
7439 if (TARGET_NO_SDMODE_STACK)
7440 return false;
7441 break;
7443 default:
7444 break;
7447 return true;
7450 static bool
7451 virtual_stack_registers_memory_p (rtx op)
7453 int regnum;
7455 if (GET_CODE (op) == REG)
7456 regnum = REGNO (op);
7458 else if (GET_CODE (op) == PLUS
7459 && GET_CODE (XEXP (op, 0)) == REG
7460 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7461 regnum = REGNO (XEXP (op, 0));
7463 else
7464 return false;
7466 return (regnum >= FIRST_VIRTUAL_REGISTER
7467 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7470 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7471 is known to not straddle a 32k boundary. This function is used
7472 to determine whether -mcmodel=medium code can use TOC pointer
7473 relative addressing for OP. This means the alignment of the TOC
7474 pointer must also be taken into account, and unfortunately that is
7475 only 8 bytes. */
7477 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7478 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7479 #endif
7481 static bool
7482 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7483 machine_mode mode)
7485 tree decl;
7486 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7488 if (GET_CODE (op) != SYMBOL_REF)
7489 return false;
7491 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7492 SYMBOL_REF. */
7493 if (mode_supports_vsx_dform_quad (mode))
7494 return false;
7496 dsize = GET_MODE_SIZE (mode);
7497 decl = SYMBOL_REF_DECL (op);
7498 if (!decl)
7500 if (dsize == 0)
7501 return false;
7503 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7504 replacing memory addresses with an anchor plus offset. We
7505 could find the decl by rummaging around in the block->objects
7506 VEC for the given offset but that seems like too much work. */
7507 dalign = BITS_PER_UNIT;
7508 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7509 && SYMBOL_REF_ANCHOR_P (op)
7510 && SYMBOL_REF_BLOCK (op) != NULL)
7512 struct object_block *block = SYMBOL_REF_BLOCK (op);
7514 dalign = block->alignment;
7515 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7517 else if (CONSTANT_POOL_ADDRESS_P (op))
7519 /* It would be nice to have get_pool_align().. */
7520 machine_mode cmode = get_pool_mode (op);
7522 dalign = GET_MODE_ALIGNMENT (cmode);
7525 else if (DECL_P (decl))
7527 dalign = DECL_ALIGN (decl);
7529 if (dsize == 0)
7531 /* Allow BLKmode when the entire object is known to not
7532 cross a 32k boundary. */
7533 if (!DECL_SIZE_UNIT (decl))
7534 return false;
7536 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7537 return false;
7539 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7540 if (dsize > 32768)
7541 return false;
7543 dalign /= BITS_PER_UNIT;
7544 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7545 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7546 return dalign >= dsize;
7549 else
7550 gcc_unreachable ();
7552 /* Find how many bits of the alignment we know for this access. */
7553 dalign /= BITS_PER_UNIT;
7554 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7555 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7556 mask = dalign - 1;
7557 lsb = offset & -offset;
7558 mask &= lsb - 1;
7559 dalign = mask + 1;
7561 return dalign >= dsize;
7564 static bool
7565 constant_pool_expr_p (rtx op)
7567 rtx base, offset;
7569 split_const (op, &base, &offset);
7570 return (GET_CODE (base) == SYMBOL_REF
7571 && CONSTANT_POOL_ADDRESS_P (base)
7572 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7575 static const_rtx tocrel_base, tocrel_offset;
7577 /* Return true if OP is a toc pointer relative address (the output
7578 of create_TOC_reference). If STRICT, do not match high part or
7579 non-split -mcmodel=large/medium toc pointer relative addresses. */
7581 bool
7582 toc_relative_expr_p (const_rtx op, bool strict)
7584 if (!TARGET_TOC)
7585 return false;
7587 if (TARGET_CMODEL != CMODEL_SMALL)
7589 /* Only match the low part. */
7590 if (GET_CODE (op) == LO_SUM
7591 && REG_P (XEXP (op, 0))
7592 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
7593 op = XEXP (op, 1);
7594 else if (strict)
7595 return false;
7598 tocrel_base = op;
7599 tocrel_offset = const0_rtx;
7600 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7602 tocrel_base = XEXP (op, 0);
7603 tocrel_offset = XEXP (op, 1);
7606 return (GET_CODE (tocrel_base) == UNSPEC
7607 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
7610 /* Return true if X is a constant pool address, and also for cmodel=medium
7611 if X is a toc-relative address known to be offsettable within MODE. */
7613 bool
7614 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7615 bool strict)
7617 return (toc_relative_expr_p (x, strict)
7618 && (TARGET_CMODEL != CMODEL_MEDIUM
7619 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7620 || mode == QImode
7621 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7622 INTVAL (tocrel_offset), mode)));
7625 static bool
7626 legitimate_small_data_p (machine_mode mode, rtx x)
7628 return (DEFAULT_ABI == ABI_V4
7629 && !flag_pic && !TARGET_TOC
7630 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7631 && small_data_operand (x, mode));
7634 /* SPE offset addressing is limited to 5-bits worth of double words. */
7635 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
7637 bool
7638 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7639 bool strict, bool worst_case)
7641 unsigned HOST_WIDE_INT offset;
7642 unsigned int extra;
7644 if (GET_CODE (x) != PLUS)
7645 return false;
7646 if (!REG_P (XEXP (x, 0)))
7647 return false;
7648 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7649 return false;
7650 if (mode_supports_vsx_dform_quad (mode))
7651 return quad_address_p (x, mode, strict);
7652 if (!reg_offset_addressing_ok_p (mode))
7653 return virtual_stack_registers_memory_p (x);
7654 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7655 return true;
7656 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
7657 return false;
7659 offset = INTVAL (XEXP (x, 1));
7660 extra = 0;
7661 switch (mode)
7663 case V4HImode:
7664 case V2SImode:
7665 case V1DImode:
7666 case V2SFmode:
7667 /* SPE vector modes. */
7668 return SPE_CONST_OFFSET_OK (offset);
7670 case DFmode:
7671 case DDmode:
7672 case DImode:
7673 /* On e500v2, we may have:
7675 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
7677 Which gets addressed with evldd instructions. */
7678 if (TARGET_E500_DOUBLE)
7679 return SPE_CONST_OFFSET_OK (offset);
7681 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7682 addressing. */
7683 if (VECTOR_MEM_VSX_P (mode))
7684 return false;
7686 if (!worst_case)
7687 break;
7688 if (!TARGET_POWERPC64)
7689 extra = 4;
7690 else if (offset & 3)
7691 return false;
7692 break;
7694 case TFmode:
7695 case IFmode:
7696 case KFmode:
7697 if (TARGET_E500_DOUBLE)
7698 return (SPE_CONST_OFFSET_OK (offset)
7699 && SPE_CONST_OFFSET_OK (offset + 8));
7700 /* fall through */
7702 case TDmode:
7703 case TImode:
7704 case PTImode:
7705 extra = 8;
7706 if (!worst_case)
7707 break;
7708 if (!TARGET_POWERPC64)
7709 extra = 12;
7710 else if (offset & 3)
7711 return false;
7712 break;
7714 default:
7715 break;
7718 offset += 0x8000;
7719 return offset < 0x10000 - extra;
7722 bool
7723 legitimate_indexed_address_p (rtx x, int strict)
7725 rtx op0, op1;
7727 if (GET_CODE (x) != PLUS)
7728 return false;
7730 op0 = XEXP (x, 0);
7731 op1 = XEXP (x, 1);
7733 /* Recognize the rtl generated by reload which we know will later be
7734 replaced with proper base and index regs. */
7735 if (!strict
7736 && reload_in_progress
7737 && (REG_P (op0) || GET_CODE (op0) == PLUS)
7738 && REG_P (op1))
7739 return true;
7741 return (REG_P (op0) && REG_P (op1)
7742 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
7743 && INT_REG_OK_FOR_INDEX_P (op1, strict))
7744 || (INT_REG_OK_FOR_BASE_P (op1, strict)
7745 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
7748 bool
7749 avoiding_indexed_address_p (machine_mode mode)
7751 /* Avoid indexed addressing for modes that have non-indexed
7752 load/store instruction forms. */
7753 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
7756 bool
7757 legitimate_indirect_address_p (rtx x, int strict)
7759 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
7762 bool
7763 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
7765 if (!TARGET_MACHO || !flag_pic
7766 || mode != SImode || GET_CODE (x) != MEM)
7767 return false;
7768 x = XEXP (x, 0);
7770 if (GET_CODE (x) != LO_SUM)
7771 return false;
7772 if (GET_CODE (XEXP (x, 0)) != REG)
7773 return false;
7774 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
7775 return false;
7776 x = XEXP (x, 1);
7778 return CONSTANT_P (x);
7781 static bool
7782 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
7784 if (GET_CODE (x) != LO_SUM)
7785 return false;
7786 if (GET_CODE (XEXP (x, 0)) != REG)
7787 return false;
7788 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7789 return false;
7790 /* quad word addresses are restricted, and we can't use LO_SUM. */
7791 if (mode_supports_vsx_dform_quad (mode))
7792 return false;
7793 /* Restrict addressing for DI because of our SUBREG hackery. */
7794 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
7795 return false;
7796 x = XEXP (x, 1);
7798 if (TARGET_ELF || TARGET_MACHO)
7800 bool large_toc_ok;
7802 if (DEFAULT_ABI == ABI_V4 && flag_pic)
7803 return false;
7804 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
7805 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
7806 recognizes some LO_SUM addresses as valid although this
7807 function says opposite. In most cases, LRA through different
7808 transformations can generate correct code for address reloads.
7809 It can not manage only some LO_SUM cases. So we need to add
7810 code analogous to one in rs6000_legitimize_reload_address for
7811 LOW_SUM here saying that some addresses are still valid. */
7812 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
7813 && small_toc_ref (x, VOIDmode));
7814 if (TARGET_TOC && ! large_toc_ok)
7815 return false;
7816 if (GET_MODE_NUNITS (mode) != 1)
7817 return false;
7818 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
7819 && !(/* ??? Assume floating point reg based on mode? */
7820 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
7821 && (mode == DFmode || mode == DDmode)))
7822 return false;
7824 return CONSTANT_P (x) || large_toc_ok;
7827 return false;
7831 /* Try machine-dependent ways of modifying an illegitimate address
7832 to be legitimate. If we find one, return the new, valid address.
7833 This is used from only one place: `memory_address' in explow.c.
7835 OLDX is the address as it was before break_out_memory_refs was
7836 called. In some cases it is useful to look at this to decide what
7837 needs to be done.
7839 It is always safe for this function to do nothing. It exists to
7840 recognize opportunities to optimize the output.
7842 On RS/6000, first check for the sum of a register with a constant
7843 integer that is out of range. If so, generate code to add the
7844 constant with the low-order 16 bits masked to the register and force
7845 this result into another register (this can be done with `cau').
7846 Then generate an address of REG+(CONST&0xffff), allowing for the
7847 possibility of bit 16 being a one.
7849 Then check for the sum of a register and something not constant, try to
7850 load the other things into a register and return the sum. */
7852 static rtx
7853 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
7854 machine_mode mode)
7856 unsigned int extra;
7858 if (!reg_offset_addressing_ok_p (mode)
7859 || mode_supports_vsx_dform_quad (mode))
7861 if (virtual_stack_registers_memory_p (x))
7862 return x;
7864 /* In theory we should not be seeing addresses of the form reg+0,
7865 but just in case it is generated, optimize it away. */
7866 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
7867 return force_reg (Pmode, XEXP (x, 0));
7869 /* For TImode with load/store quad, restrict addresses to just a single
7870 pointer, so it works with both GPRs and VSX registers. */
7871 /* Make sure both operands are registers. */
7872 else if (GET_CODE (x) == PLUS
7873 && (mode != TImode || !TARGET_QUAD_MEMORY))
7874 return gen_rtx_PLUS (Pmode,
7875 force_reg (Pmode, XEXP (x, 0)),
7876 force_reg (Pmode, XEXP (x, 1)));
7877 else
7878 return force_reg (Pmode, x);
7880 if (GET_CODE (x) == SYMBOL_REF)
7882 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
7883 if (model != 0)
7884 return rs6000_legitimize_tls_address (x, model);
7887 extra = 0;
7888 switch (mode)
7890 case TFmode:
7891 case TDmode:
7892 case TImode:
7893 case PTImode:
7894 case IFmode:
7895 case KFmode:
7896 /* As in legitimate_offset_address_p we do not assume
7897 worst-case. The mode here is just a hint as to the registers
7898 used. A TImode is usually in gprs, but may actually be in
7899 fprs. Leave worst-case scenario for reload to handle via
7900 insn constraints. PTImode is only GPRs. */
7901 extra = 8;
7902 break;
7903 default:
7904 break;
7907 if (GET_CODE (x) == PLUS
7908 && GET_CODE (XEXP (x, 0)) == REG
7909 && GET_CODE (XEXP (x, 1)) == CONST_INT
7910 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
7911 >= 0x10000 - extra)
7912 && !(SPE_VECTOR_MODE (mode)
7913 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
7915 HOST_WIDE_INT high_int, low_int;
7916 rtx sum;
7917 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
7918 if (low_int >= 0x8000 - extra)
7919 low_int = 0;
7920 high_int = INTVAL (XEXP (x, 1)) - low_int;
7921 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
7922 GEN_INT (high_int)), 0);
7923 return plus_constant (Pmode, sum, low_int);
7925 else if (GET_CODE (x) == PLUS
7926 && GET_CODE (XEXP (x, 0)) == REG
7927 && GET_CODE (XEXP (x, 1)) != CONST_INT
7928 && GET_MODE_NUNITS (mode) == 1
7929 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
7930 || (/* ??? Assume floating point reg based on mode? */
7931 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7932 && (mode == DFmode || mode == DDmode)))
7933 && !avoiding_indexed_address_p (mode))
7935 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
7936 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
7938 else if (SPE_VECTOR_MODE (mode)
7939 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
7941 if (mode == DImode)
7942 return x;
7943 /* We accept [reg + reg] and [reg + OFFSET]. */
7945 if (GET_CODE (x) == PLUS)
7947 rtx op1 = XEXP (x, 0);
7948 rtx op2 = XEXP (x, 1);
7949 rtx y;
7951 op1 = force_reg (Pmode, op1);
7953 if (GET_CODE (op2) != REG
7954 && (GET_CODE (op2) != CONST_INT
7955 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
7956 || (GET_MODE_SIZE (mode) > 8
7957 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
7958 op2 = force_reg (Pmode, op2);
7960 /* We can't always do [reg + reg] for these, because [reg +
7961 reg + offset] is not a legitimate addressing mode. */
7962 y = gen_rtx_PLUS (Pmode, op1, op2);
7964 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
7965 return force_reg (Pmode, y);
7966 else
7967 return y;
7970 return force_reg (Pmode, x);
7972 else if ((TARGET_ELF
7973 #if TARGET_MACHO
7974 || !MACHO_DYNAMIC_NO_PIC_P
7975 #endif
7977 && TARGET_32BIT
7978 && TARGET_NO_TOC
7979 && ! flag_pic
7980 && GET_CODE (x) != CONST_INT
7981 && GET_CODE (x) != CONST_WIDE_INT
7982 && GET_CODE (x) != CONST_DOUBLE
7983 && CONSTANT_P (x)
7984 && GET_MODE_NUNITS (mode) == 1
7985 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
7986 || (/* ??? Assume floating point reg based on mode? */
7987 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7988 && (mode == DFmode || mode == DDmode))))
7990 rtx reg = gen_reg_rtx (Pmode);
7991 if (TARGET_ELF)
7992 emit_insn (gen_elf_high (reg, x));
7993 else
7994 emit_insn (gen_macho_high (reg, x));
7995 return gen_rtx_LO_SUM (Pmode, reg, x);
7997 else if (TARGET_TOC
7998 && GET_CODE (x) == SYMBOL_REF
7999 && constant_pool_expr_p (x)
8000 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8001 return create_TOC_reference (x, NULL_RTX);
8002 else
8003 return x;
8006 /* Debug version of rs6000_legitimize_address. */
8007 static rtx
8008 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8010 rtx ret;
8011 rtx_insn *insns;
8013 start_sequence ();
8014 ret = rs6000_legitimize_address (x, oldx, mode);
8015 insns = get_insns ();
8016 end_sequence ();
8018 if (ret != x)
8020 fprintf (stderr,
8021 "\nrs6000_legitimize_address: mode %s, old code %s, "
8022 "new code %s, modified\n",
8023 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8024 GET_RTX_NAME (GET_CODE (ret)));
8026 fprintf (stderr, "Original address:\n");
8027 debug_rtx (x);
8029 fprintf (stderr, "oldx:\n");
8030 debug_rtx (oldx);
8032 fprintf (stderr, "New address:\n");
8033 debug_rtx (ret);
8035 if (insns)
8037 fprintf (stderr, "Insns added:\n");
8038 debug_rtx_list (insns, 20);
8041 else
8043 fprintf (stderr,
8044 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8045 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8047 debug_rtx (x);
8050 if (insns)
8051 emit_insn (insns);
8053 return ret;
8056 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8057 We need to emit DTP-relative relocations. */
8059 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8060 static void
8061 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8063 switch (size)
8065 case 4:
8066 fputs ("\t.long\t", file);
8067 break;
8068 case 8:
8069 fputs (DOUBLE_INT_ASM_OP, file);
8070 break;
8071 default:
8072 gcc_unreachable ();
8074 output_addr_const (file, x);
8075 if (TARGET_ELF)
8076 fputs ("@dtprel+0x8000", file);
8077 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8079 switch (SYMBOL_REF_TLS_MODEL (x))
8081 case 0:
8082 break;
8083 case TLS_MODEL_LOCAL_EXEC:
8084 fputs ("@le", file);
8085 break;
8086 case TLS_MODEL_INITIAL_EXEC:
8087 fputs ("@ie", file);
8088 break;
8089 case TLS_MODEL_GLOBAL_DYNAMIC:
8090 case TLS_MODEL_LOCAL_DYNAMIC:
8091 fputs ("@m", file);
8092 break;
8093 default:
8094 gcc_unreachable ();
8099 /* Return true if X is a symbol that refers to real (rather than emulated)
8100 TLS. */
8102 static bool
8103 rs6000_real_tls_symbol_ref_p (rtx x)
8105 return (GET_CODE (x) == SYMBOL_REF
8106 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8109 /* In the name of slightly smaller debug output, and to cater to
8110 general assembler lossage, recognize various UNSPEC sequences
8111 and turn them back into a direct symbol reference. */
8113 static rtx
8114 rs6000_delegitimize_address (rtx orig_x)
8116 rtx x, y, offset;
8118 orig_x = delegitimize_mem_from_attrs (orig_x);
8119 x = orig_x;
8120 if (MEM_P (x))
8121 x = XEXP (x, 0);
8123 y = x;
8124 if (TARGET_CMODEL != CMODEL_SMALL
8125 && GET_CODE (y) == LO_SUM)
8126 y = XEXP (y, 1);
8128 offset = NULL_RTX;
8129 if (GET_CODE (y) == PLUS
8130 && GET_MODE (y) == Pmode
8131 && CONST_INT_P (XEXP (y, 1)))
8133 offset = XEXP (y, 1);
8134 y = XEXP (y, 0);
8137 if (GET_CODE (y) == UNSPEC
8138 && XINT (y, 1) == UNSPEC_TOCREL)
8140 y = XVECEXP (y, 0, 0);
8142 #ifdef HAVE_AS_TLS
8143 /* Do not associate thread-local symbols with the original
8144 constant pool symbol. */
8145 if (TARGET_XCOFF
8146 && GET_CODE (y) == SYMBOL_REF
8147 && CONSTANT_POOL_ADDRESS_P (y)
8148 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8149 return orig_x;
8150 #endif
8152 if (offset != NULL_RTX)
8153 y = gen_rtx_PLUS (Pmode, y, offset);
8154 if (!MEM_P (orig_x))
8155 return y;
8156 else
8157 return replace_equiv_address_nv (orig_x, y);
8160 if (TARGET_MACHO
8161 && GET_CODE (orig_x) == LO_SUM
8162 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8164 y = XEXP (XEXP (orig_x, 1), 0);
8165 if (GET_CODE (y) == UNSPEC
8166 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8167 return XVECEXP (y, 0, 0);
8170 return orig_x;
8173 /* Return true if X shouldn't be emitted into the debug info.
8174 The linker doesn't like .toc section references from
8175 .debug_* sections, so reject .toc section symbols. */
8177 static bool
8178 rs6000_const_not_ok_for_debug_p (rtx x)
8180 if (GET_CODE (x) == SYMBOL_REF
8181 && CONSTANT_POOL_ADDRESS_P (x))
8183 rtx c = get_pool_constant (x);
8184 machine_mode cmode = get_pool_mode (x);
8185 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8186 return true;
8189 return false;
8192 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8194 static GTY(()) rtx rs6000_tls_symbol;
8195 static rtx
8196 rs6000_tls_get_addr (void)
8198 if (!rs6000_tls_symbol)
8199 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8201 return rs6000_tls_symbol;
8204 /* Construct the SYMBOL_REF for TLS GOT references. */
8206 static GTY(()) rtx rs6000_got_symbol;
8207 static rtx
8208 rs6000_got_sym (void)
8210 if (!rs6000_got_symbol)
8212 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8213 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8214 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8217 return rs6000_got_symbol;
8220 /* AIX Thread-Local Address support. */
8222 static rtx
8223 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8225 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8226 const char *name;
8227 char *tlsname;
8229 name = XSTR (addr, 0);
8230 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8231 or the symbol will be in TLS private data section. */
8232 if (name[strlen (name) - 1] != ']'
8233 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8234 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8236 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8237 strcpy (tlsname, name);
8238 strcat (tlsname,
8239 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8240 tlsaddr = copy_rtx (addr);
8241 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8243 else
8244 tlsaddr = addr;
8246 /* Place addr into TOC constant pool. */
8247 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8249 /* Output the TOC entry and create the MEM referencing the value. */
8250 if (constant_pool_expr_p (XEXP (sym, 0))
8251 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8253 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8254 mem = gen_const_mem (Pmode, tocref);
8255 set_mem_alias_set (mem, get_TOC_alias_set ());
8257 else
8258 return sym;
8260 /* Use global-dynamic for local-dynamic. */
8261 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8262 || model == TLS_MODEL_LOCAL_DYNAMIC)
8264 /* Create new TOC reference for @m symbol. */
8265 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8266 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8267 strcpy (tlsname, "*LCM");
8268 strcat (tlsname, name + 3);
8269 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8270 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8271 tocref = create_TOC_reference (modaddr, NULL_RTX);
8272 rtx modmem = gen_const_mem (Pmode, tocref);
8273 set_mem_alias_set (modmem, get_TOC_alias_set ());
8275 rtx modreg = gen_reg_rtx (Pmode);
8276 emit_insn (gen_rtx_SET (modreg, modmem));
8278 tmpreg = gen_reg_rtx (Pmode);
8279 emit_insn (gen_rtx_SET (tmpreg, mem));
8281 dest = gen_reg_rtx (Pmode);
8282 if (TARGET_32BIT)
8283 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8284 else
8285 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8286 return dest;
8288 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8289 else if (TARGET_32BIT)
8291 tlsreg = gen_reg_rtx (SImode);
8292 emit_insn (gen_tls_get_tpointer (tlsreg));
8294 else
8295 tlsreg = gen_rtx_REG (DImode, 13);
8297 /* Load the TOC value into temporary register. */
8298 tmpreg = gen_reg_rtx (Pmode);
8299 emit_insn (gen_rtx_SET (tmpreg, mem));
8300 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8301 gen_rtx_MINUS (Pmode, addr, tlsreg));
8303 /* Add TOC symbol value to TLS pointer. */
8304 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8306 return dest;
8309 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8310 this (thread-local) address. */
8312 static rtx
8313 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8315 rtx dest, insn;
8317 if (TARGET_XCOFF)
8318 return rs6000_legitimize_tls_address_aix (addr, model);
8320 dest = gen_reg_rtx (Pmode);
8321 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8323 rtx tlsreg;
8325 if (TARGET_64BIT)
8327 tlsreg = gen_rtx_REG (Pmode, 13);
8328 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8330 else
8332 tlsreg = gen_rtx_REG (Pmode, 2);
8333 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8335 emit_insn (insn);
8337 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8339 rtx tlsreg, tmp;
8341 tmp = gen_reg_rtx (Pmode);
8342 if (TARGET_64BIT)
8344 tlsreg = gen_rtx_REG (Pmode, 13);
8345 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8347 else
8349 tlsreg = gen_rtx_REG (Pmode, 2);
8350 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8352 emit_insn (insn);
8353 if (TARGET_64BIT)
8354 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8355 else
8356 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8357 emit_insn (insn);
8359 else
8361 rtx r3, got, tga, tmp1, tmp2, call_insn;
8363 /* We currently use relocations like @got@tlsgd for tls, which
8364 means the linker will handle allocation of tls entries, placing
8365 them in the .got section. So use a pointer to the .got section,
8366 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8367 or to secondary GOT sections used by 32-bit -fPIC. */
8368 if (TARGET_64BIT)
8369 got = gen_rtx_REG (Pmode, 2);
8370 else
8372 if (flag_pic == 1)
8373 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8374 else
8376 rtx gsym = rs6000_got_sym ();
8377 got = gen_reg_rtx (Pmode);
8378 if (flag_pic == 0)
8379 rs6000_emit_move (got, gsym, Pmode);
8380 else
8382 rtx mem, lab, last;
8384 tmp1 = gen_reg_rtx (Pmode);
8385 tmp2 = gen_reg_rtx (Pmode);
8386 mem = gen_const_mem (Pmode, tmp1);
8387 lab = gen_label_rtx ();
8388 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8389 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8390 if (TARGET_LINK_STACK)
8391 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8392 emit_move_insn (tmp2, mem);
8393 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8394 set_unique_reg_note (last, REG_EQUAL, gsym);
8399 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8401 tga = rs6000_tls_get_addr ();
8402 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8403 1, const0_rtx, Pmode);
8405 r3 = gen_rtx_REG (Pmode, 3);
8406 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8408 if (TARGET_64BIT)
8409 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
8410 else
8411 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
8413 else if (DEFAULT_ABI == ABI_V4)
8414 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
8415 else
8416 gcc_unreachable ();
8417 call_insn = last_call_insn ();
8418 PATTERN (call_insn) = insn;
8419 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8420 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8421 pic_offset_table_rtx);
8423 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8425 tga = rs6000_tls_get_addr ();
8426 tmp1 = gen_reg_rtx (Pmode);
8427 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8428 1, const0_rtx, Pmode);
8430 r3 = gen_rtx_REG (Pmode, 3);
8431 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8433 if (TARGET_64BIT)
8434 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
8435 else
8436 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
8438 else if (DEFAULT_ABI == ABI_V4)
8439 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
8440 else
8441 gcc_unreachable ();
8442 call_insn = last_call_insn ();
8443 PATTERN (call_insn) = insn;
8444 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8445 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8446 pic_offset_table_rtx);
8448 if (rs6000_tls_size == 16)
8450 if (TARGET_64BIT)
8451 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8452 else
8453 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8455 else if (rs6000_tls_size == 32)
8457 tmp2 = gen_reg_rtx (Pmode);
8458 if (TARGET_64BIT)
8459 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8460 else
8461 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8462 emit_insn (insn);
8463 if (TARGET_64BIT)
8464 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8465 else
8466 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8468 else
8470 tmp2 = gen_reg_rtx (Pmode);
8471 if (TARGET_64BIT)
8472 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8473 else
8474 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8475 emit_insn (insn);
8476 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8478 emit_insn (insn);
8480 else
8482 /* IE, or 64-bit offset LE. */
8483 tmp2 = gen_reg_rtx (Pmode);
8484 if (TARGET_64BIT)
8485 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8486 else
8487 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8488 emit_insn (insn);
8489 if (TARGET_64BIT)
8490 insn = gen_tls_tls_64 (dest, tmp2, addr);
8491 else
8492 insn = gen_tls_tls_32 (dest, tmp2, addr);
8493 emit_insn (insn);
8497 return dest;
8500 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8502 static bool
8503 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8505 if (GET_CODE (x) == HIGH
8506 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8507 return true;
8509 /* A TLS symbol in the TOC cannot contain a sum. */
8510 if (GET_CODE (x) == CONST
8511 && GET_CODE (XEXP (x, 0)) == PLUS
8512 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8513 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8514 return true;
8516 /* Do not place an ELF TLS symbol in the constant pool. */
8517 return TARGET_ELF && tls_referenced_p (x);
8520 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8521 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8522 can be addressed relative to the toc pointer. */
8524 static bool
8525 use_toc_relative_ref (rtx sym, machine_mode mode)
8527 return ((constant_pool_expr_p (sym)
8528 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8529 get_pool_mode (sym)))
8530 || (TARGET_CMODEL == CMODEL_MEDIUM
8531 && SYMBOL_REF_LOCAL_P (sym)
8532 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8535 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8536 replace the input X, or the original X if no replacement is called for.
8537 The output parameter *WIN is 1 if the calling macro should goto WIN,
8538 0 if it should not.
8540 For RS/6000, we wish to handle large displacements off a base
8541 register by splitting the addend across an addiu/addis and the mem insn.
8542 This cuts number of extra insns needed from 3 to 1.
8544 On Darwin, we use this to generate code for floating point constants.
8545 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8546 The Darwin code is inside #if TARGET_MACHO because only then are the
8547 machopic_* functions defined. */
8548 static rtx
8549 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8550 int opnum, int type,
8551 int ind_levels ATTRIBUTE_UNUSED, int *win)
8553 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8554 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
8556 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8557 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8558 if (reg_offset_p
8559 && opnum == 1
8560 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8561 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8562 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8563 && TARGET_P9_VECTOR)
8564 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8565 && TARGET_P9_VECTOR)))
8566 reg_offset_p = false;
8568 /* We must recognize output that we have already generated ourselves. */
8569 if (GET_CODE (x) == PLUS
8570 && GET_CODE (XEXP (x, 0)) == PLUS
8571 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8572 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8573 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8575 if (TARGET_DEBUG_ADDR)
8577 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8578 debug_rtx (x);
8580 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8581 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8582 opnum, (enum reload_type) type);
8583 *win = 1;
8584 return x;
8587 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8588 if (GET_CODE (x) == LO_SUM
8589 && GET_CODE (XEXP (x, 0)) == HIGH)
8591 if (TARGET_DEBUG_ADDR)
8593 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8594 debug_rtx (x);
8596 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8597 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8598 opnum, (enum reload_type) type);
8599 *win = 1;
8600 return x;
8603 #if TARGET_MACHO
8604 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8605 && GET_CODE (x) == LO_SUM
8606 && GET_CODE (XEXP (x, 0)) == PLUS
8607 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8608 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8609 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8610 && machopic_operand_p (XEXP (x, 1)))
8612 /* Result of previous invocation of this function on Darwin
8613 floating point constant. */
8614 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8615 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8616 opnum, (enum reload_type) type);
8617 *win = 1;
8618 return x;
8620 #endif
8622 if (TARGET_CMODEL != CMODEL_SMALL
8623 && reg_offset_p
8624 && !quad_offset_p
8625 && small_toc_ref (x, VOIDmode))
8627 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8628 x = gen_rtx_LO_SUM (Pmode, hi, x);
8629 if (TARGET_DEBUG_ADDR)
8631 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8632 debug_rtx (x);
8634 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8635 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8636 opnum, (enum reload_type) type);
8637 *win = 1;
8638 return x;
8641 if (GET_CODE (x) == PLUS
8642 && REG_P (XEXP (x, 0))
8643 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
8644 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8645 && CONST_INT_P (XEXP (x, 1))
8646 && reg_offset_p
8647 && !SPE_VECTOR_MODE (mode)
8648 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
8649 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8651 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
8652 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
8653 HOST_WIDE_INT high
8654 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8656 /* Check for 32-bit overflow or quad addresses with one of the
8657 four least significant bits set. */
8658 if (high + low != val
8659 || (quad_offset_p && (low & 0xf)))
8661 *win = 0;
8662 return x;
8665 /* Reload the high part into a base reg; leave the low part
8666 in the mem directly. */
8668 x = gen_rtx_PLUS (GET_MODE (x),
8669 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8670 GEN_INT (high)),
8671 GEN_INT (low));
8673 if (TARGET_DEBUG_ADDR)
8675 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
8676 debug_rtx (x);
8678 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8679 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8680 opnum, (enum reload_type) type);
8681 *win = 1;
8682 return x;
8685 if (GET_CODE (x) == SYMBOL_REF
8686 && reg_offset_p
8687 && !quad_offset_p
8688 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
8689 && !SPE_VECTOR_MODE (mode)
8690 #if TARGET_MACHO
8691 && DEFAULT_ABI == ABI_DARWIN
8692 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
8693 && machopic_symbol_defined_p (x)
8694 #else
8695 && DEFAULT_ABI == ABI_V4
8696 && !flag_pic
8697 #endif
8698 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
8699 The same goes for DImode without 64-bit gprs and DFmode and DDmode
8700 without fprs.
8701 ??? Assume floating point reg based on mode? This assumption is
8702 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
8703 where reload ends up doing a DFmode load of a constant from
8704 mem using two gprs. Unfortunately, at this point reload
8705 hasn't yet selected regs so poking around in reload data
8706 won't help and even if we could figure out the regs reliably,
8707 we'd still want to allow this transformation when the mem is
8708 naturally aligned. Since we say the address is good here, we
8709 can't disable offsets from LO_SUMs in mem_operand_gpr.
8710 FIXME: Allow offset from lo_sum for other modes too, when
8711 mem is sufficiently aligned.
8713 Also disallow this if the type can go in VMX/Altivec registers, since
8714 those registers do not have d-form (reg+offset) address modes. */
8715 && !reg_addr[mode].scalar_in_vmx_p
8716 && mode != TFmode
8717 && mode != TDmode
8718 && mode != IFmode
8719 && mode != KFmode
8720 && (mode != TImode || !TARGET_VSX_TIMODE)
8721 && mode != PTImode
8722 && (mode != DImode || TARGET_POWERPC64)
8723 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
8724 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
8726 #if TARGET_MACHO
8727 if (flag_pic)
8729 rtx offset = machopic_gen_offset (x);
8730 x = gen_rtx_LO_SUM (GET_MODE (x),
8731 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
8732 gen_rtx_HIGH (Pmode, offset)), offset);
8734 else
8735 #endif
8736 x = gen_rtx_LO_SUM (GET_MODE (x),
8737 gen_rtx_HIGH (Pmode, x), x);
8739 if (TARGET_DEBUG_ADDR)
8741 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
8742 debug_rtx (x);
8744 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8745 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8746 opnum, (enum reload_type) type);
8747 *win = 1;
8748 return x;
8751 /* Reload an offset address wrapped by an AND that represents the
8752 masking of the lower bits. Strip the outer AND and let reload
8753 convert the offset address into an indirect address. For VSX,
8754 force reload to create the address with an AND in a separate
8755 register, because we can't guarantee an altivec register will
8756 be used. */
8757 if (VECTOR_MEM_ALTIVEC_P (mode)
8758 && GET_CODE (x) == AND
8759 && GET_CODE (XEXP (x, 0)) == PLUS
8760 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8761 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8762 && GET_CODE (XEXP (x, 1)) == CONST_INT
8763 && INTVAL (XEXP (x, 1)) == -16)
8765 x = XEXP (x, 0);
8766 *win = 1;
8767 return x;
8770 if (TARGET_TOC
8771 && reg_offset_p
8772 && !quad_offset_p
8773 && GET_CODE (x) == SYMBOL_REF
8774 && use_toc_relative_ref (x, mode))
8776 x = create_TOC_reference (x, NULL_RTX);
8777 if (TARGET_CMODEL != CMODEL_SMALL)
8779 if (TARGET_DEBUG_ADDR)
8781 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
8782 debug_rtx (x);
8784 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8785 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8786 opnum, (enum reload_type) type);
8788 *win = 1;
8789 return x;
8791 *win = 0;
8792 return x;
8795 /* Debug version of rs6000_legitimize_reload_address. */
8796 static rtx
8797 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
8798 int opnum, int type,
8799 int ind_levels, int *win)
8801 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
8802 ind_levels, win);
8803 fprintf (stderr,
8804 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
8805 "type = %d, ind_levels = %d, win = %d, original addr:\n",
8806 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
8807 debug_rtx (x);
8809 if (x == ret)
8810 fprintf (stderr, "Same address returned\n");
8811 else if (!ret)
8812 fprintf (stderr, "NULL returned\n");
8813 else
8815 fprintf (stderr, "New address:\n");
8816 debug_rtx (ret);
8819 return ret;
8822 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8823 that is a valid memory address for an instruction.
8824 The MODE argument is the machine mode for the MEM expression
8825 that wants to use this address.
8827 On the RS/6000, there are four valid address: a SYMBOL_REF that
8828 refers to a constant pool entry of an address (or the sum of it
8829 plus a constant), a short (16-bit signed) constant plus a register,
8830 the sum of two registers, or a register indirect, possibly with an
8831 auto-increment. For DFmode, DDmode and DImode with a constant plus
8832 register, we must ensure that both words are addressable or PowerPC64
8833 with offset word aligned.
8835 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8836 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8837 because adjacent memory cells are accessed by adding word-sized offsets
8838 during assembly output. */
8839 static bool
8840 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8842 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8843 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
8845 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8846 if (VECTOR_MEM_ALTIVEC_P (mode)
8847 && GET_CODE (x) == AND
8848 && GET_CODE (XEXP (x, 1)) == CONST_INT
8849 && INTVAL (XEXP (x, 1)) == -16)
8850 x = XEXP (x, 0);
8852 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8853 return 0;
8854 if (legitimate_indirect_address_p (x, reg_ok_strict))
8855 return 1;
8856 if (TARGET_UPDATE
8857 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8858 && mode_supports_pre_incdec_p (mode)
8859 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8860 return 1;
8861 /* Handle restricted vector d-form offsets in ISA 3.0. */
8862 if (quad_offset_p)
8864 if (quad_address_p (x, mode, reg_ok_strict))
8865 return 1;
8867 else if (virtual_stack_registers_memory_p (x))
8868 return 1;
8870 else if (reg_offset_p)
8872 if (legitimate_small_data_p (mode, x))
8873 return 1;
8874 if (legitimate_constant_pool_address_p (x, mode,
8875 reg_ok_strict || lra_in_progress))
8876 return 1;
8877 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
8878 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
8879 return 1;
8882 /* For TImode, if we have load/store quad and TImode in VSX registers, only
8883 allow register indirect addresses. This will allow the values to go in
8884 either GPRs or VSX registers without reloading. The vector types would
8885 tend to go into VSX registers, so we allow REG+REG, while TImode seems
8886 somewhat split, in that some uses are GPR based, and some VSX based. */
8887 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
8888 return 0;
8889 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8890 if (! reg_ok_strict
8891 && reg_offset_p
8892 && GET_CODE (x) == PLUS
8893 && GET_CODE (XEXP (x, 0)) == REG
8894 && (XEXP (x, 0) == virtual_stack_vars_rtx
8895 || XEXP (x, 0) == arg_pointer_rtx)
8896 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8897 return 1;
8898 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8899 return 1;
8900 if (!FLOAT128_2REG_P (mode)
8901 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
8902 || TARGET_POWERPC64
8903 || (mode != DFmode && mode != DDmode)
8904 || (TARGET_E500_DOUBLE && mode != DDmode))
8905 && (TARGET_POWERPC64 || mode != DImode)
8906 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8907 && mode != PTImode
8908 && !avoiding_indexed_address_p (mode)
8909 && legitimate_indexed_address_p (x, reg_ok_strict))
8910 return 1;
8911 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8912 && mode_supports_pre_modify_p (mode)
8913 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8914 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8915 reg_ok_strict, false)
8916 || (!avoiding_indexed_address_p (mode)
8917 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8918 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
8919 return 1;
8920 if (reg_offset_p && !quad_offset_p
8921 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
8922 return 1;
8923 return 0;
8926 /* Debug version of rs6000_legitimate_address_p. */
8927 static bool
8928 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
8929 bool reg_ok_strict)
8931 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
8932 fprintf (stderr,
8933 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
8934 "strict = %d, reload = %s, code = %s\n",
8935 ret ? "true" : "false",
8936 GET_MODE_NAME (mode),
8937 reg_ok_strict,
8938 (reload_completed
8939 ? "after"
8940 : (reload_in_progress ? "progress" : "before")),
8941 GET_RTX_NAME (GET_CODE (x)));
8942 debug_rtx (x);
8944 return ret;
8947 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
8949 static bool
8950 rs6000_mode_dependent_address_p (const_rtx addr,
8951 addr_space_t as ATTRIBUTE_UNUSED)
8953 return rs6000_mode_dependent_address_ptr (addr);
8956 /* Go to LABEL if ADDR (a legitimate address expression)
8957 has an effect that depends on the machine mode it is used for.
8959 On the RS/6000 this is true of all integral offsets (since AltiVec
8960 and VSX modes don't allow them) or is a pre-increment or decrement.
8962 ??? Except that due to conceptual problems in offsettable_address_p
8963 we can't really report the problems of integral offsets. So leave
8964 this assuming that the adjustable offset must be valid for the
8965 sub-words of a TFmode operand, which is what we had before. */
8967 static bool
8968 rs6000_mode_dependent_address (const_rtx addr)
8970 switch (GET_CODE (addr))
8972 case PLUS:
8973 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
8974 is considered a legitimate address before reload, so there
8975 are no offset restrictions in that case. Note that this
8976 condition is safe in strict mode because any address involving
8977 virtual_stack_vars_rtx or arg_pointer_rtx would already have
8978 been rejected as illegitimate. */
8979 if (XEXP (addr, 0) != virtual_stack_vars_rtx
8980 && XEXP (addr, 0) != arg_pointer_rtx
8981 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
8983 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
8984 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
8986 break;
8988 case LO_SUM:
8989 /* Anything in the constant pool is sufficiently aligned that
8990 all bytes have the same high part address. */
8991 return !legitimate_constant_pool_address_p (addr, QImode, false);
8993 /* Auto-increment cases are now treated generically in recog.c. */
8994 case PRE_MODIFY:
8995 return TARGET_UPDATE;
8997 /* AND is only allowed in Altivec loads. */
8998 case AND:
8999 return true;
9001 default:
9002 break;
9005 return false;
9008 /* Debug version of rs6000_mode_dependent_address. */
9009 static bool
9010 rs6000_debug_mode_dependent_address (const_rtx addr)
9012 bool ret = rs6000_mode_dependent_address (addr);
9014 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9015 ret ? "true" : "false");
9016 debug_rtx (addr);
9018 return ret;
9021 /* Implement FIND_BASE_TERM. */
9024 rs6000_find_base_term (rtx op)
9026 rtx base;
9028 base = op;
9029 if (GET_CODE (base) == CONST)
9030 base = XEXP (base, 0);
9031 if (GET_CODE (base) == PLUS)
9032 base = XEXP (base, 0);
9033 if (GET_CODE (base) == UNSPEC)
9034 switch (XINT (base, 1))
9036 case UNSPEC_TOCREL:
9037 case UNSPEC_MACHOPIC_OFFSET:
9038 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9039 for aliasing purposes. */
9040 return XVECEXP (base, 0, 0);
9043 return op;
9046 /* More elaborate version of recog's offsettable_memref_p predicate
9047 that works around the ??? note of rs6000_mode_dependent_address.
9048 In particular it accepts
9050 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9052 in 32-bit mode, that the recog predicate rejects. */
9054 static bool
9055 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9057 bool worst_case;
9059 if (!MEM_P (op))
9060 return false;
9062 /* First mimic offsettable_memref_p. */
9063 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9064 return true;
9066 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9067 the latter predicate knows nothing about the mode of the memory
9068 reference and, therefore, assumes that it is the largest supported
9069 mode (TFmode). As a consequence, legitimate offsettable memory
9070 references are rejected. rs6000_legitimate_offset_address_p contains
9071 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9072 at least with a little bit of help here given that we know the
9073 actual registers used. */
9074 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9075 || GET_MODE_SIZE (reg_mode) == 4);
9076 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9077 true, worst_case);
9080 /* Determine the reassociation width to be used in reassociate_bb.
9081 This takes into account how many parallel operations we
9082 can actually do of a given type, and also the latency.
9084 int add/sub 6/cycle
9085 mul 2/cycle
9086 vect add/sub/mul 2/cycle
9087 fp add/sub/mul 2/cycle
9088 dfp 1/cycle
9091 static int
9092 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9093 enum machine_mode mode)
9095 switch (rs6000_cpu)
9097 case PROCESSOR_POWER8:
9098 case PROCESSOR_POWER9:
9099 if (DECIMAL_FLOAT_MODE_P (mode))
9100 return 1;
9101 if (VECTOR_MODE_P (mode))
9102 return 4;
9103 if (INTEGRAL_MODE_P (mode))
9104 return opc == MULT_EXPR ? 4 : 6;
9105 if (FLOAT_MODE_P (mode))
9106 return 4;
9107 break;
9108 default:
9109 break;
9111 return 1;
9114 /* Change register usage conditional on target flags. */
9115 static void
9116 rs6000_conditional_register_usage (void)
9118 int i;
9120 if (TARGET_DEBUG_TARGET)
9121 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9123 /* Set MQ register fixed (already call_used) so that it will not be
9124 allocated. */
9125 fixed_regs[64] = 1;
9127 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9128 if (TARGET_64BIT)
9129 fixed_regs[13] = call_used_regs[13]
9130 = call_really_used_regs[13] = 1;
9132 /* Conditionally disable FPRs. */
9133 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
9134 for (i = 32; i < 64; i++)
9135 fixed_regs[i] = call_used_regs[i]
9136 = call_really_used_regs[i] = 1;
9138 /* The TOC register is not killed across calls in a way that is
9139 visible to the compiler. */
9140 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9141 call_really_used_regs[2] = 0;
9143 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9144 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9146 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9147 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9148 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9149 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9151 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9152 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9153 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9154 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9156 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9157 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9158 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9160 if (TARGET_SPE)
9162 global_regs[SPEFSCR_REGNO] = 1;
9163 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
9164 registers in prologues and epilogues. We no longer use r14
9165 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
9166 pool for link-compatibility with older versions of GCC. Once
9167 "old" code has died out, we can return r14 to the allocation
9168 pool. */
9169 fixed_regs[14]
9170 = call_used_regs[14]
9171 = call_really_used_regs[14] = 1;
9174 if (!TARGET_ALTIVEC && !TARGET_VSX)
9176 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9177 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9178 call_really_used_regs[VRSAVE_REGNO] = 1;
9181 if (TARGET_ALTIVEC || TARGET_VSX)
9182 global_regs[VSCR_REGNO] = 1;
9184 if (TARGET_ALTIVEC_ABI)
9186 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9187 call_used_regs[i] = call_really_used_regs[i] = 1;
9189 /* AIX reserves VR20:31 in non-extended ABI mode. */
9190 if (TARGET_XCOFF)
9191 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9192 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9197 /* Output insns to set DEST equal to the constant SOURCE as a series of
9198 lis, ori and shl instructions and return TRUE. */
9200 bool
9201 rs6000_emit_set_const (rtx dest, rtx source)
9203 machine_mode mode = GET_MODE (dest);
9204 rtx temp, set;
9205 rtx_insn *insn;
9206 HOST_WIDE_INT c;
9208 gcc_checking_assert (CONST_INT_P (source));
9209 c = INTVAL (source);
9210 switch (mode)
9212 case QImode:
9213 case HImode:
9214 emit_insn (gen_rtx_SET (dest, source));
9215 return true;
9217 case SImode:
9218 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9220 emit_insn (gen_rtx_SET (copy_rtx (temp),
9221 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9222 emit_insn (gen_rtx_SET (dest,
9223 gen_rtx_IOR (SImode, copy_rtx (temp),
9224 GEN_INT (c & 0xffff))));
9225 break;
9227 case DImode:
9228 if (!TARGET_POWERPC64)
9230 rtx hi, lo;
9232 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9233 DImode);
9234 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9235 DImode);
9236 emit_move_insn (hi, GEN_INT (c >> 32));
9237 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9238 emit_move_insn (lo, GEN_INT (c));
9240 else
9241 rs6000_emit_set_long_const (dest, c);
9242 break;
9244 default:
9245 gcc_unreachable ();
9248 insn = get_last_insn ();
9249 set = single_set (insn);
9250 if (! CONSTANT_P (SET_SRC (set)))
9251 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9253 return true;
9256 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9257 Output insns to set DEST equal to the constant C as a series of
9258 lis, ori and shl instructions. */
9260 static void
9261 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9263 rtx temp;
9264 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9266 ud1 = c & 0xffff;
9267 c = c >> 16;
9268 ud2 = c & 0xffff;
9269 c = c >> 16;
9270 ud3 = c & 0xffff;
9271 c = c >> 16;
9272 ud4 = c & 0xffff;
9274 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9275 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9276 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9278 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9279 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9281 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9283 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9284 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9285 if (ud1 != 0)
9286 emit_move_insn (dest,
9287 gen_rtx_IOR (DImode, copy_rtx (temp),
9288 GEN_INT (ud1)));
9290 else if (ud3 == 0 && ud4 == 0)
9292 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9294 gcc_assert (ud2 & 0x8000);
9295 emit_move_insn (copy_rtx (temp),
9296 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9297 if (ud1 != 0)
9298 emit_move_insn (copy_rtx (temp),
9299 gen_rtx_IOR (DImode, copy_rtx (temp),
9300 GEN_INT (ud1)));
9301 emit_move_insn (dest,
9302 gen_rtx_ZERO_EXTEND (DImode,
9303 gen_lowpart (SImode,
9304 copy_rtx (temp))));
9306 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9307 || (ud4 == 0 && ! (ud3 & 0x8000)))
9309 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9311 emit_move_insn (copy_rtx (temp),
9312 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9313 if (ud2 != 0)
9314 emit_move_insn (copy_rtx (temp),
9315 gen_rtx_IOR (DImode, copy_rtx (temp),
9316 GEN_INT (ud2)));
9317 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9318 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9319 GEN_INT (16)));
9320 if (ud1 != 0)
9321 emit_move_insn (dest,
9322 gen_rtx_IOR (DImode, copy_rtx (temp),
9323 GEN_INT (ud1)));
9325 else
9327 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9329 emit_move_insn (copy_rtx (temp),
9330 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9331 if (ud3 != 0)
9332 emit_move_insn (copy_rtx (temp),
9333 gen_rtx_IOR (DImode, copy_rtx (temp),
9334 GEN_INT (ud3)));
9336 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9337 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9338 GEN_INT (32)));
9339 if (ud2 != 0)
9340 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9341 gen_rtx_IOR (DImode, copy_rtx (temp),
9342 GEN_INT (ud2 << 16)));
9343 if (ud1 != 0)
9344 emit_move_insn (dest,
9345 gen_rtx_IOR (DImode, copy_rtx (temp),
9346 GEN_INT (ud1)));
9350 /* Helper for the following. Get rid of [r+r] memory refs
9351 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9353 static void
9354 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9356 if (reload_in_progress)
9357 return;
9359 if (GET_CODE (operands[0]) == MEM
9360 && GET_CODE (XEXP (operands[0], 0)) != REG
9361 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9362 GET_MODE (operands[0]), false))
9363 operands[0]
9364 = replace_equiv_address (operands[0],
9365 copy_addr_to_reg (XEXP (operands[0], 0)));
9367 if (GET_CODE (operands[1]) == MEM
9368 && GET_CODE (XEXP (operands[1], 0)) != REG
9369 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9370 GET_MODE (operands[1]), false))
9371 operands[1]
9372 = replace_equiv_address (operands[1],
9373 copy_addr_to_reg (XEXP (operands[1], 0)));
9376 /* Generate a vector of constants to permute MODE for a little-endian
9377 storage operation by swapping the two halves of a vector. */
9378 static rtvec
9379 rs6000_const_vec (machine_mode mode)
9381 int i, subparts;
9382 rtvec v;
9384 switch (mode)
9386 case V1TImode:
9387 subparts = 1;
9388 break;
9389 case V2DFmode:
9390 case V2DImode:
9391 subparts = 2;
9392 break;
9393 case V4SFmode:
9394 case V4SImode:
9395 subparts = 4;
9396 break;
9397 case V8HImode:
9398 subparts = 8;
9399 break;
9400 case V16QImode:
9401 subparts = 16;
9402 break;
9403 default:
9404 gcc_unreachable();
9407 v = rtvec_alloc (subparts);
9409 for (i = 0; i < subparts / 2; ++i)
9410 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9411 for (i = subparts / 2; i < subparts; ++i)
9412 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9414 return v;
9417 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
9418 for a VSX load or store operation. */
9420 rs6000_gen_le_vsx_permute (rtx source, machine_mode mode)
9422 /* Use ROTATE instead of VEC_SELECT on IEEE 128-bit floating point, and
9423 128-bit integers if they are allowed in VSX registers. */
9424 if (FLOAT128_VECTOR_P (mode) || mode == TImode)
9425 return gen_rtx_ROTATE (mode, source, GEN_INT (64));
9426 else
9428 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9429 return gen_rtx_VEC_SELECT (mode, source, par);
9433 /* Emit a little-endian load from vector memory location SOURCE to VSX
9434 register DEST in mode MODE. The load is done with two permuting
9435 insn's that represent an lxvd2x and xxpermdi. */
9436 void
9437 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9439 rtx tmp, permute_mem, permute_reg;
9441 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9442 V1TImode). */
9443 if (mode == TImode || mode == V1TImode)
9445 mode = V2DImode;
9446 dest = gen_lowpart (V2DImode, dest);
9447 source = adjust_address (source, V2DImode, 0);
9450 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9451 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
9452 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
9453 emit_insn (gen_rtx_SET (tmp, permute_mem));
9454 emit_insn (gen_rtx_SET (dest, permute_reg));
9457 /* Emit a little-endian store to vector memory location DEST from VSX
9458 register SOURCE in mode MODE. The store is done with two permuting
9459 insn's that represent an xxpermdi and an stxvd2x. */
9460 void
9461 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9463 rtx tmp, permute_src, permute_tmp;
9465 /* This should never be called during or after reload, because it does
9466 not re-permute the source register. It is intended only for use
9467 during expand. */
9468 gcc_assert (!reload_in_progress && !lra_in_progress && !reload_completed);
9470 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9471 V1TImode). */
9472 if (mode == TImode || mode == V1TImode)
9474 mode = V2DImode;
9475 dest = adjust_address (dest, V2DImode, 0);
9476 source = gen_lowpart (V2DImode, source);
9479 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9480 permute_src = rs6000_gen_le_vsx_permute (source, mode);
9481 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
9482 emit_insn (gen_rtx_SET (tmp, permute_src));
9483 emit_insn (gen_rtx_SET (dest, permute_tmp));
9486 /* Emit a sequence representing a little-endian VSX load or store,
9487 moving data from SOURCE to DEST in mode MODE. This is done
9488 separately from rs6000_emit_move to ensure it is called only
9489 during expand. LE VSX loads and stores introduced later are
9490 handled with a split. The expand-time RTL generation allows
9491 us to optimize away redundant pairs of register-permutes. */
9492 void
9493 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9495 gcc_assert (!BYTES_BIG_ENDIAN
9496 && VECTOR_MEM_VSX_P (mode)
9497 && !TARGET_P9_VECTOR
9498 && !gpr_or_gpr_p (dest, source)
9499 && (MEM_P (source) ^ MEM_P (dest)));
9501 if (MEM_P (source))
9503 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9504 rs6000_emit_le_vsx_load (dest, source, mode);
9506 else
9508 if (!REG_P (source))
9509 source = force_reg (mode, source);
9510 rs6000_emit_le_vsx_store (dest, source, mode);
9514 /* Emit a move from SOURCE to DEST in mode MODE. */
9515 void
9516 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9518 rtx operands[2];
9519 operands[0] = dest;
9520 operands[1] = source;
9522 if (TARGET_DEBUG_ADDR)
9524 fprintf (stderr,
9525 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
9526 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9527 GET_MODE_NAME (mode),
9528 reload_in_progress,
9529 reload_completed,
9530 can_create_pseudo_p ());
9531 debug_rtx (dest);
9532 fprintf (stderr, "source:\n");
9533 debug_rtx (source);
9536 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
9537 if (CONST_WIDE_INT_P (operands[1])
9538 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9540 /* This should be fixed with the introduction of CONST_WIDE_INT. */
9541 gcc_unreachable ();
9544 /* Check if GCC is setting up a block move that will end up using FP
9545 registers as temporaries. We must make sure this is acceptable. */
9546 if (GET_CODE (operands[0]) == MEM
9547 && GET_CODE (operands[1]) == MEM
9548 && mode == DImode
9549 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
9550 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
9551 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
9552 ? 32 : MEM_ALIGN (operands[0])))
9553 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
9554 ? 32
9555 : MEM_ALIGN (operands[1]))))
9556 && ! MEM_VOLATILE_P (operands [0])
9557 && ! MEM_VOLATILE_P (operands [1]))
9559 emit_move_insn (adjust_address (operands[0], SImode, 0),
9560 adjust_address (operands[1], SImode, 0));
9561 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9562 adjust_address (copy_rtx (operands[1]), SImode, 4));
9563 return;
9566 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9567 && !gpc_reg_operand (operands[1], mode))
9568 operands[1] = force_reg (mode, operands[1]);
9570 /* Recognize the case where operand[1] is a reference to thread-local
9571 data and load its address to a register. */
9572 if (tls_referenced_p (operands[1]))
9574 enum tls_model model;
9575 rtx tmp = operands[1];
9576 rtx addend = NULL;
9578 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9580 addend = XEXP (XEXP (tmp, 0), 1);
9581 tmp = XEXP (XEXP (tmp, 0), 0);
9584 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
9585 model = SYMBOL_REF_TLS_MODEL (tmp);
9586 gcc_assert (model != 0);
9588 tmp = rs6000_legitimize_tls_address (tmp, model);
9589 if (addend)
9591 tmp = gen_rtx_PLUS (mode, tmp, addend);
9592 tmp = force_operand (tmp, operands[0]);
9594 operands[1] = tmp;
9597 /* Handle the case where reload calls us with an invalid address. */
9598 if (reload_in_progress && mode == Pmode
9599 && (! general_operand (operands[1], mode)
9600 || ! nonimmediate_operand (operands[0], mode)))
9601 goto emit_set;
9603 /* 128-bit constant floating-point values on Darwin should really be loaded
9604 as two parts. However, this premature splitting is a problem when DFmode
9605 values can go into Altivec registers. */
9606 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
9607 && GET_CODE (operands[1]) == CONST_DOUBLE)
9609 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9610 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9611 DFmode);
9612 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9613 GET_MODE_SIZE (DFmode)),
9614 simplify_gen_subreg (DFmode, operands[1], mode,
9615 GET_MODE_SIZE (DFmode)),
9616 DFmode);
9617 return;
9620 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
9621 cfun->machine->sdmode_stack_slot =
9622 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
9625 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9626 p1:SD) if p1 is not of floating point class and p0 is spilled as
9627 we can have no analogous movsd_store for this. */
9628 if (lra_in_progress && mode == DDmode
9629 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9630 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9631 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
9632 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9634 enum reg_class cl;
9635 int regno = REGNO (SUBREG_REG (operands[1]));
9637 if (regno >= FIRST_PSEUDO_REGISTER)
9639 cl = reg_preferred_class (regno);
9640 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9642 if (regno >= 0 && ! FP_REGNO_P (regno))
9644 mode = SDmode;
9645 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9646 operands[1] = SUBREG_REG (operands[1]);
9649 if (lra_in_progress
9650 && mode == SDmode
9651 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9652 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9653 && (REG_P (operands[1])
9654 || (GET_CODE (operands[1]) == SUBREG
9655 && REG_P (SUBREG_REG (operands[1])))))
9657 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
9658 ? SUBREG_REG (operands[1]) : operands[1]);
9659 enum reg_class cl;
9661 if (regno >= FIRST_PSEUDO_REGISTER)
9663 cl = reg_preferred_class (regno);
9664 gcc_assert (cl != NO_REGS);
9665 regno = ira_class_hard_regs[cl][0];
9667 if (FP_REGNO_P (regno))
9669 if (GET_MODE (operands[0]) != DDmode)
9670 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9671 emit_insn (gen_movsd_store (operands[0], operands[1]));
9673 else if (INT_REGNO_P (regno))
9674 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9675 else
9676 gcc_unreachable();
9677 return;
9679 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9680 p:DD)) if p0 is not of floating point class and p1 is spilled as
9681 we can have no analogous movsd_load for this. */
9682 if (lra_in_progress && mode == DDmode
9683 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
9684 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9685 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
9686 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9688 enum reg_class cl;
9689 int regno = REGNO (SUBREG_REG (operands[0]));
9691 if (regno >= FIRST_PSEUDO_REGISTER)
9693 cl = reg_preferred_class (regno);
9694 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9696 if (regno >= 0 && ! FP_REGNO_P (regno))
9698 mode = SDmode;
9699 operands[0] = SUBREG_REG (operands[0]);
9700 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9703 if (lra_in_progress
9704 && mode == SDmode
9705 && (REG_P (operands[0])
9706 || (GET_CODE (operands[0]) == SUBREG
9707 && REG_P (SUBREG_REG (operands[0]))))
9708 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
9709 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9711 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
9712 ? SUBREG_REG (operands[0]) : operands[0]);
9713 enum reg_class cl;
9715 if (regno >= FIRST_PSEUDO_REGISTER)
9717 cl = reg_preferred_class (regno);
9718 gcc_assert (cl != NO_REGS);
9719 regno = ira_class_hard_regs[cl][0];
9721 if (FP_REGNO_P (regno))
9723 if (GET_MODE (operands[1]) != DDmode)
9724 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9725 emit_insn (gen_movsd_load (operands[0], operands[1]));
9727 else if (INT_REGNO_P (regno))
9728 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9729 else
9730 gcc_unreachable();
9731 return;
9734 if (reload_in_progress
9735 && mode == SDmode
9736 && cfun->machine->sdmode_stack_slot != NULL_RTX
9737 && MEM_P (operands[0])
9738 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
9739 && REG_P (operands[1]))
9741 if (FP_REGNO_P (REGNO (operands[1])))
9743 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
9744 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9745 emit_insn (gen_movsd_store (mem, operands[1]));
9747 else if (INT_REGNO_P (REGNO (operands[1])))
9749 rtx mem = operands[0];
9750 if (BYTES_BIG_ENDIAN)
9751 mem = adjust_address_nv (mem, mode, 4);
9752 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9753 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
9755 else
9756 gcc_unreachable();
9757 return;
9759 if (reload_in_progress
9760 && mode == SDmode
9761 && REG_P (operands[0])
9762 && MEM_P (operands[1])
9763 && cfun->machine->sdmode_stack_slot != NULL_RTX
9764 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
9766 if (FP_REGNO_P (REGNO (operands[0])))
9768 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
9769 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9770 emit_insn (gen_movsd_load (operands[0], mem));
9772 else if (INT_REGNO_P (REGNO (operands[0])))
9774 rtx mem = operands[1];
9775 if (BYTES_BIG_ENDIAN)
9776 mem = adjust_address_nv (mem, mode, 4);
9777 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9778 emit_insn (gen_movsd_hardfloat (operands[0], mem));
9780 else
9781 gcc_unreachable();
9782 return;
9785 /* FIXME: In the long term, this switch statement should go away
9786 and be replaced by a sequence of tests based on things like
9787 mode == Pmode. */
9788 switch (mode)
9790 case HImode:
9791 case QImode:
9792 if (CONSTANT_P (operands[1])
9793 && GET_CODE (operands[1]) != CONST_INT)
9794 operands[1] = force_const_mem (mode, operands[1]);
9795 break;
9797 case TFmode:
9798 case TDmode:
9799 case IFmode:
9800 case KFmode:
9801 if (FLOAT128_2REG_P (mode))
9802 rs6000_eliminate_indexed_memrefs (operands);
9803 /* fall through */
9805 case DFmode:
9806 case DDmode:
9807 case SFmode:
9808 case SDmode:
9809 if (CONSTANT_P (operands[1])
9810 && ! easy_fp_constant (operands[1], mode))
9811 operands[1] = force_const_mem (mode, operands[1]);
9812 break;
9814 case V16QImode:
9815 case V8HImode:
9816 case V4SFmode:
9817 case V4SImode:
9818 case V4HImode:
9819 case V2SFmode:
9820 case V2SImode:
9821 case V1DImode:
9822 case V2DFmode:
9823 case V2DImode:
9824 case V1TImode:
9825 if (CONSTANT_P (operands[1])
9826 && !easy_vector_constant (operands[1], mode))
9827 operands[1] = force_const_mem (mode, operands[1]);
9828 break;
9830 case SImode:
9831 case DImode:
9832 /* Use default pattern for address of ELF small data */
9833 if (TARGET_ELF
9834 && mode == Pmode
9835 && DEFAULT_ABI == ABI_V4
9836 && (GET_CODE (operands[1]) == SYMBOL_REF
9837 || GET_CODE (operands[1]) == CONST)
9838 && small_data_operand (operands[1], mode))
9840 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9841 return;
9844 if (DEFAULT_ABI == ABI_V4
9845 && mode == Pmode && mode == SImode
9846 && flag_pic == 1 && got_operand (operands[1], mode))
9848 emit_insn (gen_movsi_got (operands[0], operands[1]));
9849 return;
9852 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9853 && TARGET_NO_TOC
9854 && ! flag_pic
9855 && mode == Pmode
9856 && CONSTANT_P (operands[1])
9857 && GET_CODE (operands[1]) != HIGH
9858 && GET_CODE (operands[1]) != CONST_INT)
9860 rtx target = (!can_create_pseudo_p ()
9861 ? operands[0]
9862 : gen_reg_rtx (mode));
9864 /* If this is a function address on -mcall-aixdesc,
9865 convert it to the address of the descriptor. */
9866 if (DEFAULT_ABI == ABI_AIX
9867 && GET_CODE (operands[1]) == SYMBOL_REF
9868 && XSTR (operands[1], 0)[0] == '.')
9870 const char *name = XSTR (operands[1], 0);
9871 rtx new_ref;
9872 while (*name == '.')
9873 name++;
9874 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9875 CONSTANT_POOL_ADDRESS_P (new_ref)
9876 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9877 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9878 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9879 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9880 operands[1] = new_ref;
9883 if (DEFAULT_ABI == ABI_DARWIN)
9885 #if TARGET_MACHO
9886 if (MACHO_DYNAMIC_NO_PIC_P)
9888 /* Take care of any required data indirection. */
9889 operands[1] = rs6000_machopic_legitimize_pic_address (
9890 operands[1], mode, operands[0]);
9891 if (operands[0] != operands[1])
9892 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9893 return;
9895 #endif
9896 emit_insn (gen_macho_high (target, operands[1]));
9897 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9898 return;
9901 emit_insn (gen_elf_high (target, operands[1]));
9902 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9903 return;
9906 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9907 and we have put it in the TOC, we just need to make a TOC-relative
9908 reference to it. */
9909 if (TARGET_TOC
9910 && GET_CODE (operands[1]) == SYMBOL_REF
9911 && use_toc_relative_ref (operands[1], mode))
9912 operands[1] = create_TOC_reference (operands[1], operands[0]);
9913 else if (mode == Pmode
9914 && CONSTANT_P (operands[1])
9915 && GET_CODE (operands[1]) != HIGH
9916 && ((GET_CODE (operands[1]) != CONST_INT
9917 && ! easy_fp_constant (operands[1], mode))
9918 || (GET_CODE (operands[1]) == CONST_INT
9919 && (num_insns_constant (operands[1], mode)
9920 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
9921 || (GET_CODE (operands[0]) == REG
9922 && FP_REGNO_P (REGNO (operands[0]))))
9923 && !toc_relative_expr_p (operands[1], false)
9924 && (TARGET_CMODEL == CMODEL_SMALL
9925 || can_create_pseudo_p ()
9926 || (REG_P (operands[0])
9927 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
9930 #if TARGET_MACHO
9931 /* Darwin uses a special PIC legitimizer. */
9932 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
9934 operands[1] =
9935 rs6000_machopic_legitimize_pic_address (operands[1], mode,
9936 operands[0]);
9937 if (operands[0] != operands[1])
9938 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9939 return;
9941 #endif
9943 /* If we are to limit the number of things we put in the TOC and
9944 this is a symbol plus a constant we can add in one insn,
9945 just put the symbol in the TOC and add the constant. Don't do
9946 this if reload is in progress. */
9947 if (GET_CODE (operands[1]) == CONST
9948 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
9949 && GET_CODE (XEXP (operands[1], 0)) == PLUS
9950 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
9951 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
9952 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
9953 && ! side_effects_p (operands[0]))
9955 rtx sym =
9956 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
9957 rtx other = XEXP (XEXP (operands[1], 0), 1);
9959 sym = force_reg (mode, sym);
9960 emit_insn (gen_add3_insn (operands[0], sym, other));
9961 return;
9964 operands[1] = force_const_mem (mode, operands[1]);
9966 if (TARGET_TOC
9967 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
9968 && constant_pool_expr_p (XEXP (operands[1], 0))
9969 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
9970 get_pool_constant (XEXP (operands[1], 0)),
9971 get_pool_mode (XEXP (operands[1], 0))))
9973 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
9974 operands[0]);
9975 operands[1] = gen_const_mem (mode, tocref);
9976 set_mem_alias_set (operands[1], get_TOC_alias_set ());
9979 break;
9981 case TImode:
9982 if (!VECTOR_MEM_VSX_P (TImode))
9983 rs6000_eliminate_indexed_memrefs (operands);
9984 break;
9986 case PTImode:
9987 rs6000_eliminate_indexed_memrefs (operands);
9988 break;
9990 default:
9991 fatal_insn ("bad move", gen_rtx_SET (dest, source));
9994 /* Above, we may have called force_const_mem which may have returned
9995 an invalid address. If we can, fix this up; otherwise, reload will
9996 have to deal with it. */
9997 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
9998 operands[1] = validize_mem (operands[1]);
10000 emit_set:
10001 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10004 /* Return true if a structure, union or array containing FIELD should be
10005 accessed using `BLKMODE'.
10007 For the SPE, simd types are V2SI, and gcc can be tempted to put the
10008 entire thing in a DI and use subregs to access the internals.
10009 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
10010 back-end. Because a single GPR can hold a V2SI, but not a DI, the
10011 best thing to do is set structs to BLKmode and avoid Severe Tire
10012 Damage.
10014 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
10015 fit into 1, whereas DI still needs two. */
10017 static bool
10018 rs6000_member_type_forces_blk (const_tree field, machine_mode mode)
10020 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
10021 || (TARGET_E500_DOUBLE && mode == DFmode));
10024 /* Nonzero if we can use a floating-point register to pass this arg. */
10025 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10026 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10027 && (CUM)->fregno <= FP_ARG_MAX_REG \
10028 && TARGET_HARD_FLOAT && TARGET_FPRS)
10030 /* Nonzero if we can use an AltiVec register to pass this arg. */
10031 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10032 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10033 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10034 && TARGET_ALTIVEC_ABI \
10035 && (NAMED))
10037 /* Walk down the type tree of TYPE counting consecutive base elements.
10038 If *MODEP is VOIDmode, then set it to the first valid floating point
10039 or vector type. If a non-floating point or vector type is found, or
10040 if a floating point or vector type that doesn't match a non-VOIDmode
10041 *MODEP is found, then return -1, otherwise return the count in the
10042 sub-tree. */
10044 static int
10045 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10047 machine_mode mode;
10048 HOST_WIDE_INT size;
10050 switch (TREE_CODE (type))
10052 case REAL_TYPE:
10053 mode = TYPE_MODE (type);
10054 if (!SCALAR_FLOAT_MODE_P (mode))
10055 return -1;
10057 if (*modep == VOIDmode)
10058 *modep = mode;
10060 if (*modep == mode)
10061 return 1;
10063 break;
10065 case COMPLEX_TYPE:
10066 mode = TYPE_MODE (TREE_TYPE (type));
10067 if (!SCALAR_FLOAT_MODE_P (mode))
10068 return -1;
10070 if (*modep == VOIDmode)
10071 *modep = mode;
10073 if (*modep == mode)
10074 return 2;
10076 break;
10078 case VECTOR_TYPE:
10079 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10080 return -1;
10082 /* Use V4SImode as representative of all 128-bit vector types. */
10083 size = int_size_in_bytes (type);
10084 switch (size)
10086 case 16:
10087 mode = V4SImode;
10088 break;
10089 default:
10090 return -1;
10093 if (*modep == VOIDmode)
10094 *modep = mode;
10096 /* Vector modes are considered to be opaque: two vectors are
10097 equivalent for the purposes of being homogeneous aggregates
10098 if they are the same size. */
10099 if (*modep == mode)
10100 return 1;
10102 break;
10104 case ARRAY_TYPE:
10106 int count;
10107 tree index = TYPE_DOMAIN (type);
10109 /* Can't handle incomplete types nor sizes that are not
10110 fixed. */
10111 if (!COMPLETE_TYPE_P (type)
10112 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10113 return -1;
10115 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10116 if (count == -1
10117 || !index
10118 || !TYPE_MAX_VALUE (index)
10119 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10120 || !TYPE_MIN_VALUE (index)
10121 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10122 || count < 0)
10123 return -1;
10125 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10126 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10128 /* There must be no padding. */
10129 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10130 return -1;
10132 return count;
10135 case RECORD_TYPE:
10137 int count = 0;
10138 int sub_count;
10139 tree field;
10141 /* Can't handle incomplete types nor sizes that are not
10142 fixed. */
10143 if (!COMPLETE_TYPE_P (type)
10144 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10145 return -1;
10147 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10149 if (TREE_CODE (field) != FIELD_DECL)
10150 continue;
10152 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10153 if (sub_count < 0)
10154 return -1;
10155 count += sub_count;
10158 /* There must be no padding. */
10159 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10160 return -1;
10162 return count;
10165 case UNION_TYPE:
10166 case QUAL_UNION_TYPE:
10168 /* These aren't very interesting except in a degenerate case. */
10169 int count = 0;
10170 int sub_count;
10171 tree field;
10173 /* Can't handle incomplete types nor sizes that are not
10174 fixed. */
10175 if (!COMPLETE_TYPE_P (type)
10176 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10177 return -1;
10179 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10181 if (TREE_CODE (field) != FIELD_DECL)
10182 continue;
10184 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10185 if (sub_count < 0)
10186 return -1;
10187 count = count > sub_count ? count : sub_count;
10190 /* There must be no padding. */
10191 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10192 return -1;
10194 return count;
10197 default:
10198 break;
10201 return -1;
10204 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10205 float or vector aggregate that shall be passed in FP/vector registers
10206 according to the ELFv2 ABI, return the homogeneous element mode in
10207 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10209 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10211 static bool
10212 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10213 machine_mode *elt_mode,
10214 int *n_elts)
10216 /* Note that we do not accept complex types at the top level as
10217 homogeneous aggregates; these types are handled via the
10218 targetm.calls.split_complex_arg mechanism. Complex types
10219 can be elements of homogeneous aggregates, however. */
10220 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
10222 machine_mode field_mode = VOIDmode;
10223 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10225 if (field_count > 0)
10227 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
10228 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
10230 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10231 up to AGGR_ARG_NUM_REG registers. */
10232 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
10234 if (elt_mode)
10235 *elt_mode = field_mode;
10236 if (n_elts)
10237 *n_elts = field_count;
10238 return true;
10243 if (elt_mode)
10244 *elt_mode = mode;
10245 if (n_elts)
10246 *n_elts = 1;
10247 return false;
10250 /* Return a nonzero value to say to return the function value in
10251 memory, just as large structures are always returned. TYPE will be
10252 the data type of the value, and FNTYPE will be the type of the
10253 function doing the returning, or @code{NULL} for libcalls.
10255 The AIX ABI for the RS/6000 specifies that all structures are
10256 returned in memory. The Darwin ABI does the same.
10258 For the Darwin 64 Bit ABI, a function result can be returned in
10259 registers or in memory, depending on the size of the return data
10260 type. If it is returned in registers, the value occupies the same
10261 registers as it would if it were the first and only function
10262 argument. Otherwise, the function places its result in memory at
10263 the location pointed to by GPR3.
10265 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10266 but a draft put them in memory, and GCC used to implement the draft
10267 instead of the final standard. Therefore, aix_struct_return
10268 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10269 compatibility can change DRAFT_V4_STRUCT_RET to override the
10270 default, and -m switches get the final word. See
10271 rs6000_option_override_internal for more details.
10273 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10274 long double support is enabled. These values are returned in memory.
10276 int_size_in_bytes returns -1 for variable size objects, which go in
10277 memory always. The cast to unsigned makes -1 > 8. */
10279 static bool
10280 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10282 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10283 if (TARGET_MACHO
10284 && rs6000_darwin64_abi
10285 && TREE_CODE (type) == RECORD_TYPE
10286 && int_size_in_bytes (type) > 0)
10288 CUMULATIVE_ARGS valcum;
10289 rtx valret;
10291 valcum.words = 0;
10292 valcum.fregno = FP_ARG_MIN_REG;
10293 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10294 /* Do a trial code generation as if this were going to be passed
10295 as an argument; if any part goes in memory, we return NULL. */
10296 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10297 if (valret)
10298 return false;
10299 /* Otherwise fall through to more conventional ABI rules. */
10302 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10303 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10304 NULL, NULL))
10305 return false;
10307 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10308 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10309 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10310 return false;
10312 if (AGGREGATE_TYPE_P (type)
10313 && (aix_struct_return
10314 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10315 return true;
10317 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10318 modes only exist for GCC vector types if -maltivec. */
10319 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10320 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10321 return false;
10323 /* Return synthetic vectors in memory. */
10324 if (TREE_CODE (type) == VECTOR_TYPE
10325 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10327 static bool warned_for_return_big_vectors = false;
10328 if (!warned_for_return_big_vectors)
10330 warning (0, "GCC vector returned by reference: "
10331 "non-standard ABI extension with no compatibility guarantee");
10332 warned_for_return_big_vectors = true;
10334 return true;
10337 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10338 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10339 return true;
10341 return false;
10344 /* Specify whether values returned in registers should be at the most
10345 significant end of a register. We want aggregates returned by
10346 value to match the way aggregates are passed to functions. */
10348 static bool
10349 rs6000_return_in_msb (const_tree valtype)
10351 return (DEFAULT_ABI == ABI_ELFv2
10352 && BYTES_BIG_ENDIAN
10353 && AGGREGATE_TYPE_P (valtype)
10354 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
10357 #ifdef HAVE_AS_GNU_ATTRIBUTE
10358 /* Return TRUE if a call to function FNDECL may be one that
10359 potentially affects the function calling ABI of the object file. */
10361 static bool
10362 call_ABI_of_interest (tree fndecl)
10364 if (symtab->state == EXPANSION)
10366 struct cgraph_node *c_node;
10368 /* Libcalls are always interesting. */
10369 if (fndecl == NULL_TREE)
10370 return true;
10372 /* Any call to an external function is interesting. */
10373 if (DECL_EXTERNAL (fndecl))
10374 return true;
10376 /* Interesting functions that we are emitting in this object file. */
10377 c_node = cgraph_node::get (fndecl);
10378 c_node = c_node->ultimate_alias_target ();
10379 return !c_node->only_called_directly_p ();
10381 return false;
10383 #endif
10385 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10386 for a call to a function whose data type is FNTYPE.
10387 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10389 For incoming args we set the number of arguments in the prototype large
10390 so we never return a PARALLEL. */
10392 void
10393 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10394 rtx libname ATTRIBUTE_UNUSED, int incoming,
10395 int libcall, int n_named_args,
10396 tree fndecl ATTRIBUTE_UNUSED,
10397 machine_mode return_mode ATTRIBUTE_UNUSED)
10399 static CUMULATIVE_ARGS zero_cumulative;
10401 *cum = zero_cumulative;
10402 cum->words = 0;
10403 cum->fregno = FP_ARG_MIN_REG;
10404 cum->vregno = ALTIVEC_ARG_MIN_REG;
10405 cum->prototype = (fntype && prototype_p (fntype));
10406 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10407 ? CALL_LIBCALL : CALL_NORMAL);
10408 cum->sysv_gregno = GP_ARG_MIN_REG;
10409 cum->stdarg = stdarg_p (fntype);
10410 cum->libcall = libcall;
10412 cum->nargs_prototype = 0;
10413 if (incoming || cum->prototype)
10414 cum->nargs_prototype = n_named_args;
10416 /* Check for a longcall attribute. */
10417 if ((!fntype && rs6000_default_long_calls)
10418 || (fntype
10419 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10420 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10421 cum->call_cookie |= CALL_LONG;
10423 if (TARGET_DEBUG_ARG)
10425 fprintf (stderr, "\ninit_cumulative_args:");
10426 if (fntype)
10428 tree ret_type = TREE_TYPE (fntype);
10429 fprintf (stderr, " ret code = %s,",
10430 get_tree_code_name (TREE_CODE (ret_type)));
10433 if (cum->call_cookie & CALL_LONG)
10434 fprintf (stderr, " longcall,");
10436 fprintf (stderr, " proto = %d, nargs = %d\n",
10437 cum->prototype, cum->nargs_prototype);
10440 #ifdef HAVE_AS_GNU_ATTRIBUTE
10441 if (DEFAULT_ABI == ABI_V4)
10443 cum->escapes = call_ABI_of_interest (fndecl);
10444 if (cum->escapes)
10446 tree return_type;
10448 if (fntype)
10450 return_type = TREE_TYPE (fntype);
10451 return_mode = TYPE_MODE (return_type);
10453 else
10454 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10456 if (return_type != NULL)
10458 if (TREE_CODE (return_type) == RECORD_TYPE
10459 && TYPE_TRANSPARENT_AGGR (return_type))
10461 return_type = TREE_TYPE (first_field (return_type));
10462 return_mode = TYPE_MODE (return_type);
10464 if (AGGREGATE_TYPE_P (return_type)
10465 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10466 <= 8))
10467 rs6000_returns_struct = true;
10469 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (return_mode))
10470 rs6000_passes_float = true;
10471 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
10472 || SPE_VECTOR_MODE (return_mode))
10473 rs6000_passes_vector = true;
10476 #endif
10478 if (fntype
10479 && !TARGET_ALTIVEC
10480 && TARGET_ALTIVEC_ABI
10481 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10483 error ("cannot return value in vector register because"
10484 " altivec instructions are disabled, use -maltivec"
10485 " to enable them");
10489 /* The mode the ABI uses for a word. This is not the same as word_mode
10490 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10492 static machine_mode
10493 rs6000_abi_word_mode (void)
10495 return TARGET_32BIT ? SImode : DImode;
10498 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10499 static char *
10500 rs6000_offload_options (void)
10502 if (TARGET_64BIT)
10503 return xstrdup ("-foffload-abi=lp64");
10504 else
10505 return xstrdup ("-foffload-abi=ilp32");
10508 /* On rs6000, function arguments are promoted, as are function return
10509 values. */
10511 static machine_mode
10512 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10513 machine_mode mode,
10514 int *punsignedp ATTRIBUTE_UNUSED,
10515 const_tree, int)
10517 PROMOTE_MODE (mode, *punsignedp, type);
10519 return mode;
10522 /* Return true if TYPE must be passed on the stack and not in registers. */
10524 static bool
10525 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10527 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10528 return must_pass_in_stack_var_size (mode, type);
10529 else
10530 return must_pass_in_stack_var_size_or_pad (mode, type);
10533 static inline bool
10534 is_complex_IBM_long_double (machine_mode mode)
10536 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
10539 /* Whether ABI_V4 passes MODE args to a function in floating point
10540 registers. */
10542 static bool
10543 abi_v4_pass_in_fpr (machine_mode mode)
10545 if (!TARGET_FPRS || !TARGET_HARD_FLOAT)
10546 return false;
10547 if (TARGET_SINGLE_FLOAT && mode == SFmode)
10548 return true;
10549 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
10550 return true;
10551 /* ABI_V4 passes complex IBM long double in 8 gprs.
10552 Stupid, but we can't change the ABI now. */
10553 if (is_complex_IBM_long_double (mode))
10554 return false;
10555 if (FLOAT128_2REG_P (mode))
10556 return true;
10557 if (DECIMAL_FLOAT_MODE_P (mode))
10558 return true;
10559 return false;
10562 /* If defined, a C expression which determines whether, and in which
10563 direction, to pad out an argument with extra space. The value
10564 should be of type `enum direction': either `upward' to pad above
10565 the argument, `downward' to pad below, or `none' to inhibit
10566 padding.
10568 For the AIX ABI structs are always stored left shifted in their
10569 argument slot. */
10571 enum direction
10572 function_arg_padding (machine_mode mode, const_tree type)
10574 #ifndef AGGREGATE_PADDING_FIXED
10575 #define AGGREGATE_PADDING_FIXED 0
10576 #endif
10577 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10578 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10579 #endif
10581 if (!AGGREGATE_PADDING_FIXED)
10583 /* GCC used to pass structures of the same size as integer types as
10584 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
10585 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10586 passed padded downward, except that -mstrict-align further
10587 muddied the water in that multi-component structures of 2 and 4
10588 bytes in size were passed padded upward.
10590 The following arranges for best compatibility with previous
10591 versions of gcc, but removes the -mstrict-align dependency. */
10592 if (BYTES_BIG_ENDIAN)
10594 HOST_WIDE_INT size = 0;
10596 if (mode == BLKmode)
10598 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10599 size = int_size_in_bytes (type);
10601 else
10602 size = GET_MODE_SIZE (mode);
10604 if (size == 1 || size == 2 || size == 4)
10605 return downward;
10607 return upward;
10610 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10612 if (type != 0 && AGGREGATE_TYPE_P (type))
10613 return upward;
10616 /* Fall back to the default. */
10617 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10620 /* If defined, a C expression that gives the alignment boundary, in bits,
10621 of an argument with the specified mode and type. If it is not defined,
10622 PARM_BOUNDARY is used for all arguments.
10624 V.4 wants long longs and doubles to be double word aligned. Just
10625 testing the mode size is a boneheaded way to do this as it means
10626 that other types such as complex int are also double word aligned.
10627 However, we're stuck with this because changing the ABI might break
10628 existing library interfaces.
10630 Doubleword align SPE vectors.
10631 Quadword align Altivec/VSX vectors.
10632 Quadword align large synthetic vector types. */
10634 static unsigned int
10635 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10637 machine_mode elt_mode;
10638 int n_elts;
10640 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10642 if (DEFAULT_ABI == ABI_V4
10643 && (GET_MODE_SIZE (mode) == 8
10644 || (TARGET_HARD_FLOAT
10645 && TARGET_FPRS
10646 && !is_complex_IBM_long_double (mode)
10647 && FLOAT128_2REG_P (mode))))
10648 return 64;
10649 else if (FLOAT128_VECTOR_P (mode))
10650 return 128;
10651 else if (SPE_VECTOR_MODE (mode)
10652 || (type && TREE_CODE (type) == VECTOR_TYPE
10653 && int_size_in_bytes (type) >= 8
10654 && int_size_in_bytes (type) < 16))
10655 return 64;
10656 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10657 || (type && TREE_CODE (type) == VECTOR_TYPE
10658 && int_size_in_bytes (type) >= 16))
10659 return 128;
10661 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10662 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10663 -mcompat-align-parm is used. */
10664 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10665 || DEFAULT_ABI == ABI_ELFv2)
10666 && type && TYPE_ALIGN (type) > 64)
10668 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10669 or homogeneous float/vector aggregates here. We already handled
10670 vector aggregates above, but still need to check for float here. */
10671 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10672 && !SCALAR_FLOAT_MODE_P (elt_mode));
10674 /* We used to check for BLKmode instead of the above aggregate type
10675 check. Warn when this results in any difference to the ABI. */
10676 if (aggregate_p != (mode == BLKmode))
10678 static bool warned;
10679 if (!warned && warn_psabi)
10681 warned = true;
10682 inform (input_location,
10683 "the ABI of passing aggregates with %d-byte alignment"
10684 " has changed in GCC 5",
10685 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10689 if (aggregate_p)
10690 return 128;
10693 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10694 implement the "aggregate type" check as a BLKmode check here; this
10695 means certain aggregate types are in fact not aligned. */
10696 if (TARGET_MACHO && rs6000_darwin64_abi
10697 && mode == BLKmode
10698 && type && TYPE_ALIGN (type) > 64)
10699 return 128;
10701 return PARM_BOUNDARY;
10704 /* The offset in words to the start of the parameter save area. */
10706 static unsigned int
10707 rs6000_parm_offset (void)
10709 return (DEFAULT_ABI == ABI_V4 ? 2
10710 : DEFAULT_ABI == ABI_ELFv2 ? 4
10711 : 6);
10714 /* For a function parm of MODE and TYPE, return the starting word in
10715 the parameter area. NWORDS of the parameter area are already used. */
10717 static unsigned int
10718 rs6000_parm_start (machine_mode mode, const_tree type,
10719 unsigned int nwords)
10721 unsigned int align;
10723 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10724 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10727 /* Compute the size (in words) of a function argument. */
10729 static unsigned long
10730 rs6000_arg_size (machine_mode mode, const_tree type)
10732 unsigned long size;
10734 if (mode != BLKmode)
10735 size = GET_MODE_SIZE (mode);
10736 else
10737 size = int_size_in_bytes (type);
10739 if (TARGET_32BIT)
10740 return (size + 3) >> 2;
10741 else
10742 return (size + 7) >> 3;
10745 /* Use this to flush pending int fields. */
10747 static void
10748 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10749 HOST_WIDE_INT bitpos, int final)
10751 unsigned int startbit, endbit;
10752 int intregs, intoffset;
10753 machine_mode mode;
10755 /* Handle the situations where a float is taking up the first half
10756 of the GPR, and the other half is empty (typically due to
10757 alignment restrictions). We can detect this by a 8-byte-aligned
10758 int field, or by seeing that this is the final flush for this
10759 argument. Count the word and continue on. */
10760 if (cum->floats_in_gpr == 1
10761 && (cum->intoffset % 64 == 0
10762 || (cum->intoffset == -1 && final)))
10764 cum->words++;
10765 cum->floats_in_gpr = 0;
10768 if (cum->intoffset == -1)
10769 return;
10771 intoffset = cum->intoffset;
10772 cum->intoffset = -1;
10773 cum->floats_in_gpr = 0;
10775 if (intoffset % BITS_PER_WORD != 0)
10777 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
10778 MODE_INT, 0);
10779 if (mode == BLKmode)
10781 /* We couldn't find an appropriate mode, which happens,
10782 e.g., in packed structs when there are 3 bytes to load.
10783 Back intoffset back to the beginning of the word in this
10784 case. */
10785 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10789 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10790 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10791 intregs = (endbit - startbit) / BITS_PER_WORD;
10792 cum->words += intregs;
10793 /* words should be unsigned. */
10794 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10796 int pad = (endbit/BITS_PER_WORD) - cum->words;
10797 cum->words += pad;
10801 /* The darwin64 ABI calls for us to recurse down through structs,
10802 looking for elements passed in registers. Unfortunately, we have
10803 to track int register count here also because of misalignments
10804 in powerpc alignment mode. */
10806 static void
10807 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10808 const_tree type,
10809 HOST_WIDE_INT startbitpos)
10811 tree f;
10813 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10814 if (TREE_CODE (f) == FIELD_DECL)
10816 HOST_WIDE_INT bitpos = startbitpos;
10817 tree ftype = TREE_TYPE (f);
10818 machine_mode mode;
10819 if (ftype == error_mark_node)
10820 continue;
10821 mode = TYPE_MODE (ftype);
10823 if (DECL_SIZE (f) != 0
10824 && tree_fits_uhwi_p (bit_position (f)))
10825 bitpos += int_bit_position (f);
10827 /* ??? FIXME: else assume zero offset. */
10829 if (TREE_CODE (ftype) == RECORD_TYPE)
10830 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10831 else if (USE_FP_FOR_ARG_P (cum, mode))
10833 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10834 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10835 cum->fregno += n_fpregs;
10836 /* Single-precision floats present a special problem for
10837 us, because they are smaller than an 8-byte GPR, and so
10838 the structure-packing rules combined with the standard
10839 varargs behavior mean that we want to pack float/float
10840 and float/int combinations into a single register's
10841 space. This is complicated by the arg advance flushing,
10842 which works on arbitrarily large groups of int-type
10843 fields. */
10844 if (mode == SFmode)
10846 if (cum->floats_in_gpr == 1)
10848 /* Two floats in a word; count the word and reset
10849 the float count. */
10850 cum->words++;
10851 cum->floats_in_gpr = 0;
10853 else if (bitpos % 64 == 0)
10855 /* A float at the beginning of an 8-byte word;
10856 count it and put off adjusting cum->words until
10857 we see if a arg advance flush is going to do it
10858 for us. */
10859 cum->floats_in_gpr++;
10861 else
10863 /* The float is at the end of a word, preceded
10864 by integer fields, so the arg advance flush
10865 just above has already set cum->words and
10866 everything is taken care of. */
10869 else
10870 cum->words += n_fpregs;
10872 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10874 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10875 cum->vregno++;
10876 cum->words += 2;
10878 else if (cum->intoffset == -1)
10879 cum->intoffset = bitpos;
10883 /* Check for an item that needs to be considered specially under the darwin 64
10884 bit ABI. These are record types where the mode is BLK or the structure is
10885 8 bytes in size. */
10886 static int
10887 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10889 return rs6000_darwin64_abi
10890 && ((mode == BLKmode
10891 && TREE_CODE (type) == RECORD_TYPE
10892 && int_size_in_bytes (type) > 0)
10893 || (type && TREE_CODE (type) == RECORD_TYPE
10894 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10897 /* Update the data in CUM to advance over an argument
10898 of mode MODE and data type TYPE.
10899 (TYPE is null for libcalls where that information may not be available.)
10901 Note that for args passed by reference, function_arg will be called
10902 with MODE and TYPE set to that of the pointer to the arg, not the arg
10903 itself. */
10905 static void
10906 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
10907 const_tree type, bool named, int depth)
10909 machine_mode elt_mode;
10910 int n_elts;
10912 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10914 /* Only tick off an argument if we're not recursing. */
10915 if (depth == 0)
10916 cum->nargs_prototype--;
10918 #ifdef HAVE_AS_GNU_ATTRIBUTE
10919 if (DEFAULT_ABI == ABI_V4
10920 && cum->escapes)
10922 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode))
10923 rs6000_passes_float = true;
10924 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
10925 rs6000_passes_vector = true;
10926 else if (SPE_VECTOR_MODE (mode)
10927 && !cum->stdarg
10928 && cum->sysv_gregno <= GP_ARG_MAX_REG)
10929 rs6000_passes_vector = true;
10931 #endif
10933 if (TARGET_ALTIVEC_ABI
10934 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10935 || (type && TREE_CODE (type) == VECTOR_TYPE
10936 && int_size_in_bytes (type) == 16)))
10938 bool stack = false;
10940 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10942 cum->vregno += n_elts;
10944 if (!TARGET_ALTIVEC)
10945 error ("cannot pass argument in vector register because"
10946 " altivec instructions are disabled, use -maltivec"
10947 " to enable them");
10949 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
10950 even if it is going to be passed in a vector register.
10951 Darwin does the same for variable-argument functions. */
10952 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10953 && TARGET_64BIT)
10954 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
10955 stack = true;
10957 else
10958 stack = true;
10960 if (stack)
10962 int align;
10964 /* Vector parameters must be 16-byte aligned. In 32-bit
10965 mode this means we need to take into account the offset
10966 to the parameter save area. In 64-bit mode, they just
10967 have to start on an even word, since the parameter save
10968 area is 16-byte aligned. */
10969 if (TARGET_32BIT)
10970 align = -(rs6000_parm_offset () + cum->words) & 3;
10971 else
10972 align = cum->words & 1;
10973 cum->words += align + rs6000_arg_size (mode, type);
10975 if (TARGET_DEBUG_ARG)
10977 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
10978 cum->words, align);
10979 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
10980 cum->nargs_prototype, cum->prototype,
10981 GET_MODE_NAME (mode));
10985 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
10986 && !cum->stdarg
10987 && cum->sysv_gregno <= GP_ARG_MAX_REG)
10988 cum->sysv_gregno++;
10990 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10992 int size = int_size_in_bytes (type);
10993 /* Variable sized types have size == -1 and are
10994 treated as if consisting entirely of ints.
10995 Pad to 16 byte boundary if needed. */
10996 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
10997 && (cum->words % 2) != 0)
10998 cum->words++;
10999 /* For varargs, we can just go up by the size of the struct. */
11000 if (!named)
11001 cum->words += (size + 7) / 8;
11002 else
11004 /* It is tempting to say int register count just goes up by
11005 sizeof(type)/8, but this is wrong in a case such as
11006 { int; double; int; } [powerpc alignment]. We have to
11007 grovel through the fields for these too. */
11008 cum->intoffset = 0;
11009 cum->floats_in_gpr = 0;
11010 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11011 rs6000_darwin64_record_arg_advance_flush (cum,
11012 size * BITS_PER_UNIT, 1);
11014 if (TARGET_DEBUG_ARG)
11016 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11017 cum->words, TYPE_ALIGN (type), size);
11018 fprintf (stderr,
11019 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11020 cum->nargs_prototype, cum->prototype,
11021 GET_MODE_NAME (mode));
11024 else if (DEFAULT_ABI == ABI_V4)
11026 if (abi_v4_pass_in_fpr (mode))
11028 /* _Decimal128 must use an even/odd register pair. This assumes
11029 that the register number is odd when fregno is odd. */
11030 if (mode == TDmode && (cum->fregno % 2) == 1)
11031 cum->fregno++;
11033 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11034 <= FP_ARG_V4_MAX_REG)
11035 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11036 else
11038 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11039 if (mode == DFmode || FLOAT128_IBM_P (mode)
11040 || mode == DDmode || mode == TDmode)
11041 cum->words += cum->words & 1;
11042 cum->words += rs6000_arg_size (mode, type);
11045 else
11047 int n_words = rs6000_arg_size (mode, type);
11048 int gregno = cum->sysv_gregno;
11050 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
11051 (r7,r8) or (r9,r10). As does any other 2 word item such
11052 as complex int due to a historical mistake. */
11053 if (n_words == 2)
11054 gregno += (1 - gregno) & 1;
11056 /* Multi-reg args are not split between registers and stack. */
11057 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11059 /* Long long and SPE vectors are aligned on the stack.
11060 So are other 2 word items such as complex int due to
11061 a historical mistake. */
11062 if (n_words == 2)
11063 cum->words += cum->words & 1;
11064 cum->words += n_words;
11067 /* Note: continuing to accumulate gregno past when we've started
11068 spilling to the stack indicates the fact that we've started
11069 spilling to the stack to expand_builtin_saveregs. */
11070 cum->sysv_gregno = gregno + n_words;
11073 if (TARGET_DEBUG_ARG)
11075 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11076 cum->words, cum->fregno);
11077 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11078 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11079 fprintf (stderr, "mode = %4s, named = %d\n",
11080 GET_MODE_NAME (mode), named);
11083 else
11085 int n_words = rs6000_arg_size (mode, type);
11086 int start_words = cum->words;
11087 int align_words = rs6000_parm_start (mode, type, start_words);
11089 cum->words = align_words + n_words;
11091 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
11093 /* _Decimal128 must be passed in an even/odd float register pair.
11094 This assumes that the register number is odd when fregno is
11095 odd. */
11096 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11097 cum->fregno++;
11098 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11101 if (TARGET_DEBUG_ARG)
11103 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11104 cum->words, cum->fregno);
11105 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11106 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11107 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11108 named, align_words - start_words, depth);
11113 static void
11114 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11115 const_tree type, bool named)
11117 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11121 static rtx
11122 spe_build_register_parallel (machine_mode mode, int gregno)
11124 rtx r1, r3, r5, r7;
11126 switch (mode)
11128 case DFmode:
11129 r1 = gen_rtx_REG (DImode, gregno);
11130 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11131 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
11133 case DCmode:
11134 case TFmode:
11135 r1 = gen_rtx_REG (DImode, gregno);
11136 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11137 r3 = gen_rtx_REG (DImode, gregno + 2);
11138 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
11139 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
11141 case TCmode:
11142 r1 = gen_rtx_REG (DImode, gregno);
11143 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11144 r3 = gen_rtx_REG (DImode, gregno + 2);
11145 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
11146 r5 = gen_rtx_REG (DImode, gregno + 4);
11147 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
11148 r7 = gen_rtx_REG (DImode, gregno + 6);
11149 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
11150 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
11152 default:
11153 gcc_unreachable ();
11157 /* Determine where to put a SIMD argument on the SPE. */
11158 static rtx
11159 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, machine_mode mode,
11160 const_tree type)
11162 int gregno = cum->sysv_gregno;
11164 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
11165 are passed and returned in a pair of GPRs for ABI compatibility. */
11166 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
11167 || mode == DCmode || mode == TCmode))
11169 int n_words = rs6000_arg_size (mode, type);
11171 /* Doubles go in an odd/even register pair (r5/r6, etc). */
11172 if (mode == DFmode)
11173 gregno += (1 - gregno) & 1;
11175 /* Multi-reg args are not split between registers and stack. */
11176 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11177 return NULL_RTX;
11179 return spe_build_register_parallel (mode, gregno);
11181 if (cum->stdarg)
11183 int n_words = rs6000_arg_size (mode, type);
11185 /* SPE vectors are put in odd registers. */
11186 if (n_words == 2 && (gregno & 1) == 0)
11187 gregno += 1;
11189 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
11191 rtx r1, r2;
11192 machine_mode m = SImode;
11194 r1 = gen_rtx_REG (m, gregno);
11195 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
11196 r2 = gen_rtx_REG (m, gregno + 1);
11197 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
11198 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
11200 else
11201 return NULL_RTX;
11203 else
11205 if (gregno <= GP_ARG_MAX_REG)
11206 return gen_rtx_REG (mode, gregno);
11207 else
11208 return NULL_RTX;
11212 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11213 structure between cum->intoffset and bitpos to integer registers. */
11215 static void
11216 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11217 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11219 machine_mode mode;
11220 unsigned int regno;
11221 unsigned int startbit, endbit;
11222 int this_regno, intregs, intoffset;
11223 rtx reg;
11225 if (cum->intoffset == -1)
11226 return;
11228 intoffset = cum->intoffset;
11229 cum->intoffset = -1;
11231 /* If this is the trailing part of a word, try to only load that
11232 much into the register. Otherwise load the whole register. Note
11233 that in the latter case we may pick up unwanted bits. It's not a
11234 problem at the moment but may wish to revisit. */
11236 if (intoffset % BITS_PER_WORD != 0)
11238 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
11239 MODE_INT, 0);
11240 if (mode == BLKmode)
11242 /* We couldn't find an appropriate mode, which happens,
11243 e.g., in packed structs when there are 3 bytes to load.
11244 Back intoffset back to the beginning of the word in this
11245 case. */
11246 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11247 mode = word_mode;
11250 else
11251 mode = word_mode;
11253 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11254 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11255 intregs = (endbit - startbit) / BITS_PER_WORD;
11256 this_regno = cum->words + intoffset / BITS_PER_WORD;
11258 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11259 cum->use_stack = 1;
11261 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11262 if (intregs <= 0)
11263 return;
11265 intoffset /= BITS_PER_UNIT;
11268 regno = GP_ARG_MIN_REG + this_regno;
11269 reg = gen_rtx_REG (mode, regno);
11270 rvec[(*k)++] =
11271 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11273 this_regno += 1;
11274 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11275 mode = word_mode;
11276 intregs -= 1;
11278 while (intregs > 0);
11281 /* Recursive workhorse for the following. */
11283 static void
11284 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11285 HOST_WIDE_INT startbitpos, rtx rvec[],
11286 int *k)
11288 tree f;
11290 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11291 if (TREE_CODE (f) == FIELD_DECL)
11293 HOST_WIDE_INT bitpos = startbitpos;
11294 tree ftype = TREE_TYPE (f);
11295 machine_mode mode;
11296 if (ftype == error_mark_node)
11297 continue;
11298 mode = TYPE_MODE (ftype);
11300 if (DECL_SIZE (f) != 0
11301 && tree_fits_uhwi_p (bit_position (f)))
11302 bitpos += int_bit_position (f);
11304 /* ??? FIXME: else assume zero offset. */
11306 if (TREE_CODE (ftype) == RECORD_TYPE)
11307 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11308 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11310 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11311 #if 0
11312 switch (mode)
11314 case SCmode: mode = SFmode; break;
11315 case DCmode: mode = DFmode; break;
11316 case TCmode: mode = TFmode; break;
11317 default: break;
11319 #endif
11320 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11321 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11323 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11324 && (mode == TFmode || mode == TDmode));
11325 /* Long double or _Decimal128 split over regs and memory. */
11326 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11327 cum->use_stack=1;
11329 rvec[(*k)++]
11330 = gen_rtx_EXPR_LIST (VOIDmode,
11331 gen_rtx_REG (mode, cum->fregno++),
11332 GEN_INT (bitpos / BITS_PER_UNIT));
11333 if (FLOAT128_2REG_P (mode))
11334 cum->fregno++;
11336 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11338 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11339 rvec[(*k)++]
11340 = gen_rtx_EXPR_LIST (VOIDmode,
11341 gen_rtx_REG (mode, cum->vregno++),
11342 GEN_INT (bitpos / BITS_PER_UNIT));
11344 else if (cum->intoffset == -1)
11345 cum->intoffset = bitpos;
11349 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11350 the register(s) to be used for each field and subfield of a struct
11351 being passed by value, along with the offset of where the
11352 register's value may be found in the block. FP fields go in FP
11353 register, vector fields go in vector registers, and everything
11354 else goes in int registers, packed as in memory.
11356 This code is also used for function return values. RETVAL indicates
11357 whether this is the case.
11359 Much of this is taken from the SPARC V9 port, which has a similar
11360 calling convention. */
11362 static rtx
11363 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11364 bool named, bool retval)
11366 rtx rvec[FIRST_PSEUDO_REGISTER];
11367 int k = 1, kbase = 1;
11368 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11369 /* This is a copy; modifications are not visible to our caller. */
11370 CUMULATIVE_ARGS copy_cum = *orig_cum;
11371 CUMULATIVE_ARGS *cum = &copy_cum;
11373 /* Pad to 16 byte boundary if needed. */
11374 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11375 && (cum->words % 2) != 0)
11376 cum->words++;
11378 cum->intoffset = 0;
11379 cum->use_stack = 0;
11380 cum->named = named;
11382 /* Put entries into rvec[] for individual FP and vector fields, and
11383 for the chunks of memory that go in int regs. Note we start at
11384 element 1; 0 is reserved for an indication of using memory, and
11385 may or may not be filled in below. */
11386 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11387 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11389 /* If any part of the struct went on the stack put all of it there.
11390 This hack is because the generic code for
11391 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11392 parts of the struct are not at the beginning. */
11393 if (cum->use_stack)
11395 if (retval)
11396 return NULL_RTX; /* doesn't go in registers at all */
11397 kbase = 0;
11398 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11400 if (k > 1 || cum->use_stack)
11401 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11402 else
11403 return NULL_RTX;
11406 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11408 static rtx
11409 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11410 int align_words)
11412 int n_units;
11413 int i, k;
11414 rtx rvec[GP_ARG_NUM_REG + 1];
11416 if (align_words >= GP_ARG_NUM_REG)
11417 return NULL_RTX;
11419 n_units = rs6000_arg_size (mode, type);
11421 /* Optimize the simple case where the arg fits in one gpr, except in
11422 the case of BLKmode due to assign_parms assuming that registers are
11423 BITS_PER_WORD wide. */
11424 if (n_units == 0
11425 || (n_units == 1 && mode != BLKmode))
11426 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11428 k = 0;
11429 if (align_words + n_units > GP_ARG_NUM_REG)
11430 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11431 using a magic NULL_RTX component.
11432 This is not strictly correct. Only some of the arg belongs in
11433 memory, not all of it. However, the normal scheme using
11434 function_arg_partial_nregs can result in unusual subregs, eg.
11435 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11436 store the whole arg to memory is often more efficient than code
11437 to store pieces, and we know that space is available in the right
11438 place for the whole arg. */
11439 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11441 i = 0;
11444 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11445 rtx off = GEN_INT (i++ * 4);
11446 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11448 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11450 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11453 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11454 but must also be copied into the parameter save area starting at
11455 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11456 to the GPRs and/or memory. Return the number of elements used. */
11458 static int
11459 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11460 int align_words, rtx *rvec)
11462 int k = 0;
11464 if (align_words < GP_ARG_NUM_REG)
11466 int n_words = rs6000_arg_size (mode, type);
11468 if (align_words + n_words > GP_ARG_NUM_REG
11469 || mode == BLKmode
11470 || (TARGET_32BIT && TARGET_POWERPC64))
11472 /* If this is partially on the stack, then we only
11473 include the portion actually in registers here. */
11474 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11475 int i = 0;
11477 if (align_words + n_words > GP_ARG_NUM_REG)
11479 /* Not all of the arg fits in gprs. Say that it goes in memory
11480 too, using a magic NULL_RTX component. Also see comment in
11481 rs6000_mixed_function_arg for why the normal
11482 function_arg_partial_nregs scheme doesn't work in this case. */
11483 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11488 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11489 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11490 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11492 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11494 else
11496 /* The whole arg fits in gprs. */
11497 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11498 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11501 else
11503 /* It's entirely in memory. */
11504 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11507 return k;
11510 /* RVEC is a vector of K components of an argument of mode MODE.
11511 Construct the final function_arg return value from it. */
11513 static rtx
11514 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11516 gcc_assert (k >= 1);
11518 /* Avoid returning a PARALLEL in the trivial cases. */
11519 if (k == 1)
11521 if (XEXP (rvec[0], 0) == NULL_RTX)
11522 return NULL_RTX;
11524 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11525 return XEXP (rvec[0], 0);
11528 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11531 /* Determine where to put an argument to a function.
11532 Value is zero to push the argument on the stack,
11533 or a hard register in which to store the argument.
11535 MODE is the argument's machine mode.
11536 TYPE is the data type of the argument (as a tree).
11537 This is null for libcalls where that information may
11538 not be available.
11539 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11540 the preceding args and about the function being called. It is
11541 not modified in this routine.
11542 NAMED is nonzero if this argument is a named parameter
11543 (otherwise it is an extra parameter matching an ellipsis).
11545 On RS/6000 the first eight words of non-FP are normally in registers
11546 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11547 Under V.4, the first 8 FP args are in registers.
11549 If this is floating-point and no prototype is specified, we use
11550 both an FP and integer register (or possibly FP reg and stack). Library
11551 functions (when CALL_LIBCALL is set) always have the proper types for args,
11552 so we can pass the FP value just in one register. emit_library_function
11553 doesn't support PARALLEL anyway.
11555 Note that for args passed by reference, function_arg will be called
11556 with MODE and TYPE set to that of the pointer to the arg, not the arg
11557 itself. */
11559 static rtx
11560 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11561 const_tree type, bool named)
11563 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11564 enum rs6000_abi abi = DEFAULT_ABI;
11565 machine_mode elt_mode;
11566 int n_elts;
11568 /* Return a marker to indicate whether CR1 needs to set or clear the
11569 bit that V.4 uses to say fp args were passed in registers.
11570 Assume that we don't need the marker for software floating point,
11571 or compiler generated library calls. */
11572 if (mode == VOIDmode)
11574 if (abi == ABI_V4
11575 && (cum->call_cookie & CALL_LIBCALL) == 0
11576 && (cum->stdarg
11577 || (cum->nargs_prototype < 0
11578 && (cum->prototype || TARGET_NO_PROTOTYPE))))
11580 /* For the SPE, we need to crxor CR6 always. */
11581 if (TARGET_SPE_ABI)
11582 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
11583 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
11584 return GEN_INT (cum->call_cookie
11585 | ((cum->fregno == FP_ARG_MIN_REG)
11586 ? CALL_V4_SET_FP_ARGS
11587 : CALL_V4_CLEAR_FP_ARGS));
11590 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11593 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11595 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11597 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11598 if (rslt != NULL_RTX)
11599 return rslt;
11600 /* Else fall through to usual handling. */
11603 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11605 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11606 rtx r, off;
11607 int i, k = 0;
11609 /* Do we also need to pass this argument in the parameter save area?
11610 Library support functions for IEEE 128-bit are assumed to not need the
11611 value passed both in GPRs and in vector registers. */
11612 if (TARGET_64BIT && !cum->prototype
11613 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11615 int align_words = ROUND_UP (cum->words, 2);
11616 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11619 /* Describe where this argument goes in the vector registers. */
11620 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11622 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11623 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11624 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11627 return rs6000_finish_function_arg (mode, rvec, k);
11629 else if (TARGET_ALTIVEC_ABI
11630 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11631 || (type && TREE_CODE (type) == VECTOR_TYPE
11632 && int_size_in_bytes (type) == 16)))
11634 if (named || abi == ABI_V4)
11635 return NULL_RTX;
11636 else
11638 /* Vector parameters to varargs functions under AIX or Darwin
11639 get passed in memory and possibly also in GPRs. */
11640 int align, align_words, n_words;
11641 machine_mode part_mode;
11643 /* Vector parameters must be 16-byte aligned. In 32-bit
11644 mode this means we need to take into account the offset
11645 to the parameter save area. In 64-bit mode, they just
11646 have to start on an even word, since the parameter save
11647 area is 16-byte aligned. */
11648 if (TARGET_32BIT)
11649 align = -(rs6000_parm_offset () + cum->words) & 3;
11650 else
11651 align = cum->words & 1;
11652 align_words = cum->words + align;
11654 /* Out of registers? Memory, then. */
11655 if (align_words >= GP_ARG_NUM_REG)
11656 return NULL_RTX;
11658 if (TARGET_32BIT && TARGET_POWERPC64)
11659 return rs6000_mixed_function_arg (mode, type, align_words);
11661 /* The vector value goes in GPRs. Only the part of the
11662 value in GPRs is reported here. */
11663 part_mode = mode;
11664 n_words = rs6000_arg_size (mode, type);
11665 if (align_words + n_words > GP_ARG_NUM_REG)
11666 /* Fortunately, there are only two possibilities, the value
11667 is either wholly in GPRs or half in GPRs and half not. */
11668 part_mode = DImode;
11670 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11673 else if (TARGET_SPE_ABI && TARGET_SPE
11674 && (SPE_VECTOR_MODE (mode)
11675 || (TARGET_E500_DOUBLE && (mode == DFmode
11676 || mode == DCmode
11677 || mode == TFmode
11678 || mode == TCmode))))
11679 return rs6000_spe_function_arg (cum, mode, type);
11681 else if (abi == ABI_V4)
11683 if (abi_v4_pass_in_fpr (mode))
11685 /* _Decimal128 must use an even/odd register pair. This assumes
11686 that the register number is odd when fregno is odd. */
11687 if (mode == TDmode && (cum->fregno % 2) == 1)
11688 cum->fregno++;
11690 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11691 <= FP_ARG_V4_MAX_REG)
11692 return gen_rtx_REG (mode, cum->fregno);
11693 else
11694 return NULL_RTX;
11696 else
11698 int n_words = rs6000_arg_size (mode, type);
11699 int gregno = cum->sysv_gregno;
11701 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
11702 (r7,r8) or (r9,r10). As does any other 2 word item such
11703 as complex int due to a historical mistake. */
11704 if (n_words == 2)
11705 gregno += (1 - gregno) & 1;
11707 /* Multi-reg args are not split between registers and stack. */
11708 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11709 return NULL_RTX;
11711 if (TARGET_32BIT && TARGET_POWERPC64)
11712 return rs6000_mixed_function_arg (mode, type,
11713 gregno - GP_ARG_MIN_REG);
11714 return gen_rtx_REG (mode, gregno);
11717 else
11719 int align_words = rs6000_parm_start (mode, type, cum->words);
11721 /* _Decimal128 must be passed in an even/odd float register pair.
11722 This assumes that the register number is odd when fregno is odd. */
11723 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11724 cum->fregno++;
11726 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11728 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11729 rtx r, off;
11730 int i, k = 0;
11731 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11732 int fpr_words;
11734 /* Do we also need to pass this argument in the parameter
11735 save area? */
11736 if (type && (cum->nargs_prototype <= 0
11737 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11738 && TARGET_XL_COMPAT
11739 && align_words >= GP_ARG_NUM_REG)))
11740 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11742 /* Describe where this argument goes in the fprs. */
11743 for (i = 0; i < n_elts
11744 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11746 /* Check if the argument is split over registers and memory.
11747 This can only ever happen for long double or _Decimal128;
11748 complex types are handled via split_complex_arg. */
11749 machine_mode fmode = elt_mode;
11750 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11752 gcc_assert (FLOAT128_2REG_P (fmode));
11753 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11756 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11757 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11758 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11761 /* If there were not enough FPRs to hold the argument, the rest
11762 usually goes into memory. However, if the current position
11763 is still within the register parameter area, a portion may
11764 actually have to go into GPRs.
11766 Note that it may happen that the portion of the argument
11767 passed in the first "half" of the first GPR was already
11768 passed in the last FPR as well.
11770 For unnamed arguments, we already set up GPRs to cover the
11771 whole argument in rs6000_psave_function_arg, so there is
11772 nothing further to do at this point. */
11773 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11774 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11775 && cum->nargs_prototype > 0)
11777 static bool warned;
11779 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11780 int n_words = rs6000_arg_size (mode, type);
11782 align_words += fpr_words;
11783 n_words -= fpr_words;
11787 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11788 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11789 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11791 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11793 if (!warned && warn_psabi)
11795 warned = true;
11796 inform (input_location,
11797 "the ABI of passing homogeneous float aggregates"
11798 " has changed in GCC 5");
11802 return rs6000_finish_function_arg (mode, rvec, k);
11804 else if (align_words < GP_ARG_NUM_REG)
11806 if (TARGET_32BIT && TARGET_POWERPC64)
11807 return rs6000_mixed_function_arg (mode, type, align_words);
11809 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11811 else
11812 return NULL_RTX;
11816 /* For an arg passed partly in registers and partly in memory, this is
11817 the number of bytes passed in registers. For args passed entirely in
11818 registers or entirely in memory, zero. When an arg is described by a
11819 PARALLEL, perhaps using more than one register type, this function
11820 returns the number of bytes used by the first element of the PARALLEL. */
11822 static int
11823 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11824 tree type, bool named)
11826 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11827 bool passed_in_gprs = true;
11828 int ret = 0;
11829 int align_words;
11830 machine_mode elt_mode;
11831 int n_elts;
11833 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11835 if (DEFAULT_ABI == ABI_V4)
11836 return 0;
11838 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11840 /* If we are passing this arg in the fixed parameter save area (gprs or
11841 memory) as well as VRs, we do not use the partial bytes mechanism;
11842 instead, rs6000_function_arg will return a PARALLEL including a memory
11843 element as necessary. Library support functions for IEEE 128-bit are
11844 assumed to not need the value passed both in GPRs and in vector
11845 registers. */
11846 if (TARGET_64BIT && !cum->prototype
11847 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11848 return 0;
11850 /* Otherwise, we pass in VRs only. Check for partial copies. */
11851 passed_in_gprs = false;
11852 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11853 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11856 /* In this complicated case we just disable the partial_nregs code. */
11857 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11858 return 0;
11860 align_words = rs6000_parm_start (mode, type, cum->words);
11862 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11864 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11866 /* If we are passing this arg in the fixed parameter save area
11867 (gprs or memory) as well as FPRs, we do not use the partial
11868 bytes mechanism; instead, rs6000_function_arg will return a
11869 PARALLEL including a memory element as necessary. */
11870 if (type
11871 && (cum->nargs_prototype <= 0
11872 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11873 && TARGET_XL_COMPAT
11874 && align_words >= GP_ARG_NUM_REG)))
11875 return 0;
11877 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11878 passed_in_gprs = false;
11879 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11881 /* Compute number of bytes / words passed in FPRs. If there
11882 is still space available in the register parameter area
11883 *after* that amount, a part of the argument will be passed
11884 in GPRs. In that case, the total amount passed in any
11885 registers is equal to the amount that would have been passed
11886 in GPRs if everything were passed there, so we fall back to
11887 the GPR code below to compute the appropriate value. */
11888 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11889 * MIN (8, GET_MODE_SIZE (elt_mode)));
11890 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11892 if (align_words + fpr_words < GP_ARG_NUM_REG)
11893 passed_in_gprs = true;
11894 else
11895 ret = fpr;
11899 if (passed_in_gprs
11900 && align_words < GP_ARG_NUM_REG
11901 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11902 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11904 if (ret != 0 && TARGET_DEBUG_ARG)
11905 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11907 return ret;
11910 /* A C expression that indicates when an argument must be passed by
11911 reference. If nonzero for an argument, a copy of that argument is
11912 made in memory and a pointer to the argument is passed instead of
11913 the argument itself. The pointer is passed in whatever way is
11914 appropriate for passing a pointer to that type.
11916 Under V.4, aggregates and long double are passed by reference.
11918 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11919 reference unless the AltiVec vector extension ABI is in force.
11921 As an extension to all ABIs, variable sized types are passed by
11922 reference. */
11924 static bool
11925 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11926 machine_mode mode, const_tree type,
11927 bool named ATTRIBUTE_UNUSED)
11929 if (!type)
11930 return 0;
11932 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11933 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11935 if (TARGET_DEBUG_ARG)
11936 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11937 return 1;
11940 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11942 if (TARGET_DEBUG_ARG)
11943 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11944 return 1;
11947 if (int_size_in_bytes (type) < 0)
11949 if (TARGET_DEBUG_ARG)
11950 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11951 return 1;
11954 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11955 modes only exist for GCC vector types if -maltivec. */
11956 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11958 if (TARGET_DEBUG_ARG)
11959 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11960 return 1;
11963 /* Pass synthetic vectors in memory. */
11964 if (TREE_CODE (type) == VECTOR_TYPE
11965 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11967 static bool warned_for_pass_big_vectors = false;
11968 if (TARGET_DEBUG_ARG)
11969 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11970 if (!warned_for_pass_big_vectors)
11972 warning (0, "GCC vector passed by reference: "
11973 "non-standard ABI extension with no compatibility guarantee");
11974 warned_for_pass_big_vectors = true;
11976 return 1;
11979 return 0;
11982 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11983 already processes. Return true if the parameter must be passed
11984 (fully or partially) on the stack. */
11986 static bool
11987 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11989 machine_mode mode;
11990 int unsignedp;
11991 rtx entry_parm;
11993 /* Catch errors. */
11994 if (type == NULL || type == error_mark_node)
11995 return true;
11997 /* Handle types with no storage requirement. */
11998 if (TYPE_MODE (type) == VOIDmode)
11999 return false;
12001 /* Handle complex types. */
12002 if (TREE_CODE (type) == COMPLEX_TYPE)
12003 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12004 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12006 /* Handle transparent aggregates. */
12007 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12008 && TYPE_TRANSPARENT_AGGR (type))
12009 type = TREE_TYPE (first_field (type));
12011 /* See if this arg was passed by invisible reference. */
12012 if (pass_by_reference (get_cumulative_args (args_so_far),
12013 TYPE_MODE (type), type, true))
12014 type = build_pointer_type (type);
12016 /* Find mode as it is passed by the ABI. */
12017 unsignedp = TYPE_UNSIGNED (type);
12018 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12020 /* If we must pass in stack, we need a stack. */
12021 if (rs6000_must_pass_in_stack (mode, type))
12022 return true;
12024 /* If there is no incoming register, we need a stack. */
12025 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12026 if (entry_parm == NULL)
12027 return true;
12029 /* Likewise if we need to pass both in registers and on the stack. */
12030 if (GET_CODE (entry_parm) == PARALLEL
12031 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12032 return true;
12034 /* Also true if we're partially in registers and partially not. */
12035 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12036 return true;
12038 /* Update info on where next arg arrives in registers. */
12039 rs6000_function_arg_advance (args_so_far, mode, type, true);
12040 return false;
12043 /* Return true if FUN has no prototype, has a variable argument
12044 list, or passes any parameter in memory. */
12046 static bool
12047 rs6000_function_parms_need_stack (tree fun, bool incoming)
12049 tree fntype, result;
12050 CUMULATIVE_ARGS args_so_far_v;
12051 cumulative_args_t args_so_far;
12053 if (!fun)
12054 /* Must be a libcall, all of which only use reg parms. */
12055 return false;
12057 fntype = fun;
12058 if (!TYPE_P (fun))
12059 fntype = TREE_TYPE (fun);
12061 /* Varargs functions need the parameter save area. */
12062 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12063 return true;
12065 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12066 args_so_far = pack_cumulative_args (&args_so_far_v);
12068 /* When incoming, we will have been passed the function decl.
12069 It is necessary to use the decl to handle K&R style functions,
12070 where TYPE_ARG_TYPES may not be available. */
12071 if (incoming)
12073 gcc_assert (DECL_P (fun));
12074 result = DECL_RESULT (fun);
12076 else
12077 result = TREE_TYPE (fntype);
12079 if (result && aggregate_value_p (result, fntype))
12081 if (!TYPE_P (result))
12082 result = TREE_TYPE (result);
12083 result = build_pointer_type (result);
12084 rs6000_parm_needs_stack (args_so_far, result);
12087 if (incoming)
12089 tree parm;
12091 for (parm = DECL_ARGUMENTS (fun);
12092 parm && parm != void_list_node;
12093 parm = TREE_CHAIN (parm))
12094 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12095 return true;
12097 else
12099 function_args_iterator args_iter;
12100 tree arg_type;
12102 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12103 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12104 return true;
12107 return false;
12110 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12111 usually a constant depending on the ABI. However, in the ELFv2 ABI
12112 the register parameter area is optional when calling a function that
12113 has a prototype is scope, has no variable argument list, and passes
12114 all parameters in registers. */
12117 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12119 int reg_parm_stack_space;
12121 switch (DEFAULT_ABI)
12123 default:
12124 reg_parm_stack_space = 0;
12125 break;
12127 case ABI_AIX:
12128 case ABI_DARWIN:
12129 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12130 break;
12132 case ABI_ELFv2:
12133 /* ??? Recomputing this every time is a bit expensive. Is there
12134 a place to cache this information? */
12135 if (rs6000_function_parms_need_stack (fun, incoming))
12136 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12137 else
12138 reg_parm_stack_space = 0;
12139 break;
12142 return reg_parm_stack_space;
12145 static void
12146 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12148 int i;
12149 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12151 if (nregs == 0)
12152 return;
12154 for (i = 0; i < nregs; i++)
12156 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12157 if (reload_completed)
12159 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12160 tem = NULL_RTX;
12161 else
12162 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12163 i * GET_MODE_SIZE (reg_mode));
12165 else
12166 tem = replace_equiv_address (tem, XEXP (tem, 0));
12168 gcc_assert (tem);
12170 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12174 /* Perform any needed actions needed for a function that is receiving a
12175 variable number of arguments.
12177 CUM is as above.
12179 MODE and TYPE are the mode and type of the current parameter.
12181 PRETEND_SIZE is a variable that should be set to the amount of stack
12182 that must be pushed by the prolog to pretend that our caller pushed
12185 Normally, this macro will push all remaining incoming registers on the
12186 stack and set PRETEND_SIZE to the length of the registers pushed. */
12188 static void
12189 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12190 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12191 int no_rtl)
12193 CUMULATIVE_ARGS next_cum;
12194 int reg_size = TARGET_32BIT ? 4 : 8;
12195 rtx save_area = NULL_RTX, mem;
12196 int first_reg_offset;
12197 alias_set_type set;
12199 /* Skip the last named argument. */
12200 next_cum = *get_cumulative_args (cum);
12201 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12203 if (DEFAULT_ABI == ABI_V4)
12205 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12207 if (! no_rtl)
12209 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12210 HOST_WIDE_INT offset = 0;
12212 /* Try to optimize the size of the varargs save area.
12213 The ABI requires that ap.reg_save_area is doubleword
12214 aligned, but we don't need to allocate space for all
12215 the bytes, only those to which we actually will save
12216 anything. */
12217 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12218 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12219 if (TARGET_HARD_FLOAT && TARGET_FPRS
12220 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12221 && cfun->va_list_fpr_size)
12223 if (gpr_reg_num)
12224 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12225 * UNITS_PER_FP_WORD;
12226 if (cfun->va_list_fpr_size
12227 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12228 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12229 else
12230 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12231 * UNITS_PER_FP_WORD;
12233 if (gpr_reg_num)
12235 offset = -((first_reg_offset * reg_size) & ~7);
12236 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12238 gpr_reg_num = cfun->va_list_gpr_size;
12239 if (reg_size == 4 && (first_reg_offset & 1))
12240 gpr_reg_num++;
12242 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12244 else if (fpr_size)
12245 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12246 * UNITS_PER_FP_WORD
12247 - (int) (GP_ARG_NUM_REG * reg_size);
12249 if (gpr_size + fpr_size)
12251 rtx reg_save_area
12252 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12253 gcc_assert (GET_CODE (reg_save_area) == MEM);
12254 reg_save_area = XEXP (reg_save_area, 0);
12255 if (GET_CODE (reg_save_area) == PLUS)
12257 gcc_assert (XEXP (reg_save_area, 0)
12258 == virtual_stack_vars_rtx);
12259 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12260 offset += INTVAL (XEXP (reg_save_area, 1));
12262 else
12263 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12266 cfun->machine->varargs_save_offset = offset;
12267 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12270 else
12272 first_reg_offset = next_cum.words;
12273 save_area = crtl->args.internal_arg_pointer;
12275 if (targetm.calls.must_pass_in_stack (mode, type))
12276 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12279 set = get_varargs_alias_set ();
12280 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12281 && cfun->va_list_gpr_size)
12283 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12285 if (va_list_gpr_counter_field)
12286 /* V4 va_list_gpr_size counts number of registers needed. */
12287 n_gpr = cfun->va_list_gpr_size;
12288 else
12289 /* char * va_list instead counts number of bytes needed. */
12290 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12292 if (nregs > n_gpr)
12293 nregs = n_gpr;
12295 mem = gen_rtx_MEM (BLKmode,
12296 plus_constant (Pmode, save_area,
12297 first_reg_offset * reg_size));
12298 MEM_NOTRAP_P (mem) = 1;
12299 set_mem_alias_set (mem, set);
12300 set_mem_align (mem, BITS_PER_WORD);
12302 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12303 nregs);
12306 /* Save FP registers if needed. */
12307 if (DEFAULT_ABI == ABI_V4
12308 && TARGET_HARD_FLOAT && TARGET_FPRS
12309 && ! no_rtl
12310 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12311 && cfun->va_list_fpr_size)
12313 int fregno = next_cum.fregno, nregs;
12314 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12315 rtx lab = gen_label_rtx ();
12316 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12317 * UNITS_PER_FP_WORD);
12319 emit_jump_insn
12320 (gen_rtx_SET (pc_rtx,
12321 gen_rtx_IF_THEN_ELSE (VOIDmode,
12322 gen_rtx_NE (VOIDmode, cr1,
12323 const0_rtx),
12324 gen_rtx_LABEL_REF (VOIDmode, lab),
12325 pc_rtx)));
12327 for (nregs = 0;
12328 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12329 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12331 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12332 ? DFmode : SFmode,
12333 plus_constant (Pmode, save_area, off));
12334 MEM_NOTRAP_P (mem) = 1;
12335 set_mem_alias_set (mem, set);
12336 set_mem_align (mem, GET_MODE_ALIGNMENT (
12337 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12338 ? DFmode : SFmode));
12339 emit_move_insn (mem, gen_rtx_REG (
12340 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12341 ? DFmode : SFmode, fregno));
12344 emit_label (lab);
12348 /* Create the va_list data type. */
12350 static tree
12351 rs6000_build_builtin_va_list (void)
12353 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12355 /* For AIX, prefer 'char *' because that's what the system
12356 header files like. */
12357 if (DEFAULT_ABI != ABI_V4)
12358 return build_pointer_type (char_type_node);
12360 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12361 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12362 get_identifier ("__va_list_tag"), record);
12364 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12365 unsigned_char_type_node);
12366 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12367 unsigned_char_type_node);
12368 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12369 every user file. */
12370 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12371 get_identifier ("reserved"), short_unsigned_type_node);
12372 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12373 get_identifier ("overflow_arg_area"),
12374 ptr_type_node);
12375 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12376 get_identifier ("reg_save_area"),
12377 ptr_type_node);
12379 va_list_gpr_counter_field = f_gpr;
12380 va_list_fpr_counter_field = f_fpr;
12382 DECL_FIELD_CONTEXT (f_gpr) = record;
12383 DECL_FIELD_CONTEXT (f_fpr) = record;
12384 DECL_FIELD_CONTEXT (f_res) = record;
12385 DECL_FIELD_CONTEXT (f_ovf) = record;
12386 DECL_FIELD_CONTEXT (f_sav) = record;
12388 TYPE_STUB_DECL (record) = type_decl;
12389 TYPE_NAME (record) = type_decl;
12390 TYPE_FIELDS (record) = f_gpr;
12391 DECL_CHAIN (f_gpr) = f_fpr;
12392 DECL_CHAIN (f_fpr) = f_res;
12393 DECL_CHAIN (f_res) = f_ovf;
12394 DECL_CHAIN (f_ovf) = f_sav;
12396 layout_type (record);
12398 /* The correct type is an array type of one element. */
12399 return build_array_type (record, build_index_type (size_zero_node));
12402 /* Implement va_start. */
12404 static void
12405 rs6000_va_start (tree valist, rtx nextarg)
12407 HOST_WIDE_INT words, n_gpr, n_fpr;
12408 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12409 tree gpr, fpr, ovf, sav, t;
12411 /* Only SVR4 needs something special. */
12412 if (DEFAULT_ABI != ABI_V4)
12414 std_expand_builtin_va_start (valist, nextarg);
12415 return;
12418 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12419 f_fpr = DECL_CHAIN (f_gpr);
12420 f_res = DECL_CHAIN (f_fpr);
12421 f_ovf = DECL_CHAIN (f_res);
12422 f_sav = DECL_CHAIN (f_ovf);
12424 valist = build_simple_mem_ref (valist);
12425 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12426 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12427 f_fpr, NULL_TREE);
12428 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12429 f_ovf, NULL_TREE);
12430 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12431 f_sav, NULL_TREE);
12433 /* Count number of gp and fp argument registers used. */
12434 words = crtl->args.info.words;
12435 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12436 GP_ARG_NUM_REG);
12437 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12438 FP_ARG_NUM_REG);
12440 if (TARGET_DEBUG_ARG)
12441 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12442 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12443 words, n_gpr, n_fpr);
12445 if (cfun->va_list_gpr_size)
12447 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12448 build_int_cst (NULL_TREE, n_gpr));
12449 TREE_SIDE_EFFECTS (t) = 1;
12450 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12453 if (cfun->va_list_fpr_size)
12455 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12456 build_int_cst (NULL_TREE, n_fpr));
12457 TREE_SIDE_EFFECTS (t) = 1;
12458 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12460 #ifdef HAVE_AS_GNU_ATTRIBUTE
12461 if (call_ABI_of_interest (cfun->decl))
12462 rs6000_passes_float = true;
12463 #endif
12466 /* Find the overflow area. */
12467 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12468 if (words != 0)
12469 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12470 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12471 TREE_SIDE_EFFECTS (t) = 1;
12472 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12474 /* If there were no va_arg invocations, don't set up the register
12475 save area. */
12476 if (!cfun->va_list_gpr_size
12477 && !cfun->va_list_fpr_size
12478 && n_gpr < GP_ARG_NUM_REG
12479 && n_fpr < FP_ARG_V4_MAX_REG)
12480 return;
12482 /* Find the register save area. */
12483 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12484 if (cfun->machine->varargs_save_offset)
12485 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12486 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12487 TREE_SIDE_EFFECTS (t) = 1;
12488 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12491 /* Implement va_arg. */
12493 static tree
12494 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12495 gimple_seq *post_p)
12497 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12498 tree gpr, fpr, ovf, sav, reg, t, u;
12499 int size, rsize, n_reg, sav_ofs, sav_scale;
12500 tree lab_false, lab_over, addr;
12501 int align;
12502 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12503 int regalign = 0;
12504 gimple *stmt;
12506 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12508 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12509 return build_va_arg_indirect_ref (t);
12512 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12513 earlier version of gcc, with the property that it always applied alignment
12514 adjustments to the va-args (even for zero-sized types). The cheapest way
12515 to deal with this is to replicate the effect of the part of
12516 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12517 of relevance.
12518 We don't need to check for pass-by-reference because of the test above.
12519 We can return a simplifed answer, since we know there's no offset to add. */
12521 if (((TARGET_MACHO
12522 && rs6000_darwin64_abi)
12523 || DEFAULT_ABI == ABI_ELFv2
12524 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12525 && integer_zerop (TYPE_SIZE (type)))
12527 unsigned HOST_WIDE_INT align, boundary;
12528 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12529 align = PARM_BOUNDARY / BITS_PER_UNIT;
12530 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12531 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12532 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12533 boundary /= BITS_PER_UNIT;
12534 if (boundary > align)
12536 tree t ;
12537 /* This updates arg ptr by the amount that would be necessary
12538 to align the zero-sized (but not zero-alignment) item. */
12539 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12540 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12541 gimplify_and_add (t, pre_p);
12543 t = fold_convert (sizetype, valist_tmp);
12544 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12545 fold_convert (TREE_TYPE (valist),
12546 fold_build2 (BIT_AND_EXPR, sizetype, t,
12547 size_int (-boundary))));
12548 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12549 gimplify_and_add (t, pre_p);
12551 /* Since it is zero-sized there's no increment for the item itself. */
12552 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12553 return build_va_arg_indirect_ref (valist_tmp);
12556 if (DEFAULT_ABI != ABI_V4)
12558 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12560 tree elem_type = TREE_TYPE (type);
12561 machine_mode elem_mode = TYPE_MODE (elem_type);
12562 int elem_size = GET_MODE_SIZE (elem_mode);
12564 if (elem_size < UNITS_PER_WORD)
12566 tree real_part, imag_part;
12567 gimple_seq post = NULL;
12569 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12570 &post);
12571 /* Copy the value into a temporary, lest the formal temporary
12572 be reused out from under us. */
12573 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12574 gimple_seq_add_seq (pre_p, post);
12576 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12577 post_p);
12579 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12583 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12586 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12587 f_fpr = DECL_CHAIN (f_gpr);
12588 f_res = DECL_CHAIN (f_fpr);
12589 f_ovf = DECL_CHAIN (f_res);
12590 f_sav = DECL_CHAIN (f_ovf);
12592 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12593 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12594 f_fpr, NULL_TREE);
12595 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12596 f_ovf, NULL_TREE);
12597 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12598 f_sav, NULL_TREE);
12600 size = int_size_in_bytes (type);
12601 rsize = (size + 3) / 4;
12602 align = 1;
12604 machine_mode mode = TYPE_MODE (type);
12605 if (abi_v4_pass_in_fpr (mode))
12607 /* FP args go in FP registers, if present. */
12608 reg = fpr;
12609 n_reg = (size + 7) / 8;
12610 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
12611 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
12612 if (mode != SFmode && mode != SDmode)
12613 align = 8;
12615 else
12617 /* Otherwise into GP registers. */
12618 reg = gpr;
12619 n_reg = rsize;
12620 sav_ofs = 0;
12621 sav_scale = 4;
12622 if (n_reg == 2)
12623 align = 8;
12626 /* Pull the value out of the saved registers.... */
12628 lab_over = NULL;
12629 addr = create_tmp_var (ptr_type_node, "addr");
12631 /* AltiVec vectors never go in registers when -mabi=altivec. */
12632 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12633 align = 16;
12634 else
12636 lab_false = create_artificial_label (input_location);
12637 lab_over = create_artificial_label (input_location);
12639 /* Long long and SPE vectors are aligned in the registers.
12640 As are any other 2 gpr item such as complex int due to a
12641 historical mistake. */
12642 u = reg;
12643 if (n_reg == 2 && reg == gpr)
12645 regalign = 1;
12646 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12647 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12648 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12649 unshare_expr (reg), u);
12651 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12652 reg number is 0 for f1, so we want to make it odd. */
12653 else if (reg == fpr && mode == TDmode)
12655 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12656 build_int_cst (TREE_TYPE (reg), 1));
12657 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12660 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12661 t = build2 (GE_EXPR, boolean_type_node, u, t);
12662 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12663 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12664 gimplify_and_add (t, pre_p);
12666 t = sav;
12667 if (sav_ofs)
12668 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12670 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12671 build_int_cst (TREE_TYPE (reg), n_reg));
12672 u = fold_convert (sizetype, u);
12673 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12674 t = fold_build_pointer_plus (t, u);
12676 /* _Decimal32 varargs are located in the second word of the 64-bit
12677 FP register for 32-bit binaries. */
12678 if (TARGET_32BIT
12679 && TARGET_HARD_FLOAT && TARGET_FPRS
12680 && mode == SDmode)
12681 t = fold_build_pointer_plus_hwi (t, size);
12683 gimplify_assign (addr, t, pre_p);
12685 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12687 stmt = gimple_build_label (lab_false);
12688 gimple_seq_add_stmt (pre_p, stmt);
12690 if ((n_reg == 2 && !regalign) || n_reg > 2)
12692 /* Ensure that we don't find any more args in regs.
12693 Alignment has taken care of for special cases. */
12694 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12698 /* ... otherwise out of the overflow area. */
12700 /* Care for on-stack alignment if needed. */
12701 t = ovf;
12702 if (align != 1)
12704 t = fold_build_pointer_plus_hwi (t, align - 1);
12705 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12706 build_int_cst (TREE_TYPE (t), -align));
12708 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12710 gimplify_assign (unshare_expr (addr), t, pre_p);
12712 t = fold_build_pointer_plus_hwi (t, size);
12713 gimplify_assign (unshare_expr (ovf), t, pre_p);
12715 if (lab_over)
12717 stmt = gimple_build_label (lab_over);
12718 gimple_seq_add_stmt (pre_p, stmt);
12721 if (STRICT_ALIGNMENT
12722 && (TYPE_ALIGN (type)
12723 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12725 /* The value (of type complex double, for example) may not be
12726 aligned in memory in the saved registers, so copy via a
12727 temporary. (This is the same code as used for SPARC.) */
12728 tree tmp = create_tmp_var (type, "va_arg_tmp");
12729 tree dest_addr = build_fold_addr_expr (tmp);
12731 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12732 3, dest_addr, addr, size_int (rsize * 4));
12734 gimplify_and_add (copy, pre_p);
12735 addr = dest_addr;
12738 addr = fold_convert (ptrtype, addr);
12739 return build_va_arg_indirect_ref (addr);
12742 /* Builtins. */
12744 static void
12745 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12747 tree t;
12748 unsigned classify = rs6000_builtin_info[(int)code].attr;
12749 const char *attr_string = "";
12751 gcc_assert (name != NULL);
12752 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12754 if (rs6000_builtin_decls[(int)code])
12755 fatal_error (input_location,
12756 "internal error: builtin function %s already processed", name);
12758 rs6000_builtin_decls[(int)code] = t =
12759 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12761 /* Set any special attributes. */
12762 if ((classify & RS6000_BTC_CONST) != 0)
12764 /* const function, function only depends on the inputs. */
12765 TREE_READONLY (t) = 1;
12766 TREE_NOTHROW (t) = 1;
12767 attr_string = ", const";
12769 else if ((classify & RS6000_BTC_PURE) != 0)
12771 /* pure function, function can read global memory, but does not set any
12772 external state. */
12773 DECL_PURE_P (t) = 1;
12774 TREE_NOTHROW (t) = 1;
12775 attr_string = ", pure";
12777 else if ((classify & RS6000_BTC_FP) != 0)
12779 /* Function is a math function. If rounding mode is on, then treat the
12780 function as not reading global memory, but it can have arbitrary side
12781 effects. If it is off, then assume the function is a const function.
12782 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12783 builtin-attribute.def that is used for the math functions. */
12784 TREE_NOTHROW (t) = 1;
12785 if (flag_rounding_math)
12787 DECL_PURE_P (t) = 1;
12788 DECL_IS_NOVOPS (t) = 1;
12789 attr_string = ", fp, pure";
12791 else
12793 TREE_READONLY (t) = 1;
12794 attr_string = ", fp, const";
12797 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12798 gcc_unreachable ();
12800 if (TARGET_DEBUG_BUILTIN)
12801 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12802 (int)code, name, attr_string);
12805 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12807 #undef RS6000_BUILTIN_0
12808 #undef RS6000_BUILTIN_1
12809 #undef RS6000_BUILTIN_2
12810 #undef RS6000_BUILTIN_3
12811 #undef RS6000_BUILTIN_A
12812 #undef RS6000_BUILTIN_D
12813 #undef RS6000_BUILTIN_E
12814 #undef RS6000_BUILTIN_H
12815 #undef RS6000_BUILTIN_P
12816 #undef RS6000_BUILTIN_Q
12817 #undef RS6000_BUILTIN_S
12818 #undef RS6000_BUILTIN_X
12820 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12821 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12822 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12823 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12824 { MASK, ICODE, NAME, ENUM },
12826 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12827 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12828 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12829 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12830 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12831 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12832 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12833 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12835 static const struct builtin_description bdesc_3arg[] =
12837 #include "rs6000-builtin.def"
12840 /* DST operations: void foo (void *, const int, const char). */
12842 #undef RS6000_BUILTIN_0
12843 #undef RS6000_BUILTIN_1
12844 #undef RS6000_BUILTIN_2
12845 #undef RS6000_BUILTIN_3
12846 #undef RS6000_BUILTIN_A
12847 #undef RS6000_BUILTIN_D
12848 #undef RS6000_BUILTIN_E
12849 #undef RS6000_BUILTIN_H
12850 #undef RS6000_BUILTIN_P
12851 #undef RS6000_BUILTIN_Q
12852 #undef RS6000_BUILTIN_S
12853 #undef RS6000_BUILTIN_X
12855 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12856 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12857 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12858 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12859 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12860 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12861 { MASK, ICODE, NAME, ENUM },
12863 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12864 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12865 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12866 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12867 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12868 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12870 static const struct builtin_description bdesc_dst[] =
12872 #include "rs6000-builtin.def"
12875 /* Simple binary operations: VECc = foo (VECa, VECb). */
12877 #undef RS6000_BUILTIN_0
12878 #undef RS6000_BUILTIN_1
12879 #undef RS6000_BUILTIN_2
12880 #undef RS6000_BUILTIN_3
12881 #undef RS6000_BUILTIN_A
12882 #undef RS6000_BUILTIN_D
12883 #undef RS6000_BUILTIN_E
12884 #undef RS6000_BUILTIN_H
12885 #undef RS6000_BUILTIN_P
12886 #undef RS6000_BUILTIN_Q
12887 #undef RS6000_BUILTIN_S
12888 #undef RS6000_BUILTIN_X
12890 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12891 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12892 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12893 { MASK, ICODE, NAME, ENUM },
12895 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12896 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12897 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12898 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12899 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12900 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12901 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12902 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12903 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12905 static const struct builtin_description bdesc_2arg[] =
12907 #include "rs6000-builtin.def"
12910 #undef RS6000_BUILTIN_0
12911 #undef RS6000_BUILTIN_1
12912 #undef RS6000_BUILTIN_2
12913 #undef RS6000_BUILTIN_3
12914 #undef RS6000_BUILTIN_A
12915 #undef RS6000_BUILTIN_D
12916 #undef RS6000_BUILTIN_E
12917 #undef RS6000_BUILTIN_H
12918 #undef RS6000_BUILTIN_P
12919 #undef RS6000_BUILTIN_Q
12920 #undef RS6000_BUILTIN_S
12921 #undef RS6000_BUILTIN_X
12923 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12924 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12925 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12926 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12927 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12928 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12929 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12930 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12931 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12932 { MASK, ICODE, NAME, ENUM },
12934 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12935 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12936 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12938 /* AltiVec predicates. */
12940 static const struct builtin_description bdesc_altivec_preds[] =
12942 #include "rs6000-builtin.def"
12945 /* SPE predicates. */
12946 #undef RS6000_BUILTIN_0
12947 #undef RS6000_BUILTIN_1
12948 #undef RS6000_BUILTIN_2
12949 #undef RS6000_BUILTIN_3
12950 #undef RS6000_BUILTIN_A
12951 #undef RS6000_BUILTIN_D
12952 #undef RS6000_BUILTIN_E
12953 #undef RS6000_BUILTIN_H
12954 #undef RS6000_BUILTIN_P
12955 #undef RS6000_BUILTIN_Q
12956 #undef RS6000_BUILTIN_S
12957 #undef RS6000_BUILTIN_X
12959 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12960 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12961 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12962 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12963 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12964 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12965 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12966 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12967 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12968 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12969 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
12970 { MASK, ICODE, NAME, ENUM },
12972 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12974 static const struct builtin_description bdesc_spe_predicates[] =
12976 #include "rs6000-builtin.def"
12979 /* SPE evsel predicates. */
12980 #undef RS6000_BUILTIN_0
12981 #undef RS6000_BUILTIN_1
12982 #undef RS6000_BUILTIN_2
12983 #undef RS6000_BUILTIN_3
12984 #undef RS6000_BUILTIN_A
12985 #undef RS6000_BUILTIN_D
12986 #undef RS6000_BUILTIN_E
12987 #undef RS6000_BUILTIN_H
12988 #undef RS6000_BUILTIN_P
12989 #undef RS6000_BUILTIN_Q
12990 #undef RS6000_BUILTIN_S
12991 #undef RS6000_BUILTIN_X
12993 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12994 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12995 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12996 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12997 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12998 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12999 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
13000 { MASK, ICODE, NAME, ENUM },
13002 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13003 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13004 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13005 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13006 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13008 static const struct builtin_description bdesc_spe_evsel[] =
13010 #include "rs6000-builtin.def"
13013 /* PAIRED predicates. */
13014 #undef RS6000_BUILTIN_0
13015 #undef RS6000_BUILTIN_1
13016 #undef RS6000_BUILTIN_2
13017 #undef RS6000_BUILTIN_3
13018 #undef RS6000_BUILTIN_A
13019 #undef RS6000_BUILTIN_D
13020 #undef RS6000_BUILTIN_E
13021 #undef RS6000_BUILTIN_H
13022 #undef RS6000_BUILTIN_P
13023 #undef RS6000_BUILTIN_Q
13024 #undef RS6000_BUILTIN_S
13025 #undef RS6000_BUILTIN_X
13027 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13028 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13029 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13030 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13031 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13032 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13033 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13034 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13035 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13036 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13037 { MASK, ICODE, NAME, ENUM },
13039 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13040 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13042 static const struct builtin_description bdesc_paired_preds[] =
13044 #include "rs6000-builtin.def"
13047 /* ABS* operations. */
13049 #undef RS6000_BUILTIN_0
13050 #undef RS6000_BUILTIN_1
13051 #undef RS6000_BUILTIN_2
13052 #undef RS6000_BUILTIN_3
13053 #undef RS6000_BUILTIN_A
13054 #undef RS6000_BUILTIN_D
13055 #undef RS6000_BUILTIN_E
13056 #undef RS6000_BUILTIN_H
13057 #undef RS6000_BUILTIN_P
13058 #undef RS6000_BUILTIN_Q
13059 #undef RS6000_BUILTIN_S
13060 #undef RS6000_BUILTIN_X
13062 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13063 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13064 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13065 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13066 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13067 { MASK, ICODE, NAME, ENUM },
13069 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13070 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13071 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13072 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13073 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13074 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13075 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13077 static const struct builtin_description bdesc_abs[] =
13079 #include "rs6000-builtin.def"
13082 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13083 foo (VECa). */
13085 #undef RS6000_BUILTIN_0
13086 #undef RS6000_BUILTIN_1
13087 #undef RS6000_BUILTIN_2
13088 #undef RS6000_BUILTIN_3
13089 #undef RS6000_BUILTIN_A
13090 #undef RS6000_BUILTIN_D
13091 #undef RS6000_BUILTIN_E
13092 #undef RS6000_BUILTIN_H
13093 #undef RS6000_BUILTIN_P
13094 #undef RS6000_BUILTIN_Q
13095 #undef RS6000_BUILTIN_S
13096 #undef RS6000_BUILTIN_X
13098 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13099 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13100 { MASK, ICODE, NAME, ENUM },
13102 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13103 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13104 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13105 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13106 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13107 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13108 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13109 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13110 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13111 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13113 static const struct builtin_description bdesc_1arg[] =
13115 #include "rs6000-builtin.def"
13118 /* Simple no-argument operations: result = __builtin_darn_32 () */
13120 #undef RS6000_BUILTIN_0
13121 #undef RS6000_BUILTIN_1
13122 #undef RS6000_BUILTIN_2
13123 #undef RS6000_BUILTIN_3
13124 #undef RS6000_BUILTIN_A
13125 #undef RS6000_BUILTIN_D
13126 #undef RS6000_BUILTIN_E
13127 #undef RS6000_BUILTIN_H
13128 #undef RS6000_BUILTIN_P
13129 #undef RS6000_BUILTIN_Q
13130 #undef RS6000_BUILTIN_S
13131 #undef RS6000_BUILTIN_X
13133 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13134 { MASK, ICODE, NAME, ENUM },
13136 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13137 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13138 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13139 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13140 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13141 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13142 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13143 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13144 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13145 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13146 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13148 static const struct builtin_description bdesc_0arg[] =
13150 #include "rs6000-builtin.def"
13153 /* HTM builtins. */
13154 #undef RS6000_BUILTIN_0
13155 #undef RS6000_BUILTIN_1
13156 #undef RS6000_BUILTIN_2
13157 #undef RS6000_BUILTIN_3
13158 #undef RS6000_BUILTIN_A
13159 #undef RS6000_BUILTIN_D
13160 #undef RS6000_BUILTIN_E
13161 #undef RS6000_BUILTIN_H
13162 #undef RS6000_BUILTIN_P
13163 #undef RS6000_BUILTIN_Q
13164 #undef RS6000_BUILTIN_S
13165 #undef RS6000_BUILTIN_X
13167 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13168 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13169 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13170 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13171 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13172 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13173 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13174 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13175 { MASK, ICODE, NAME, ENUM },
13177 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13178 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13179 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13180 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13182 static const struct builtin_description bdesc_htm[] =
13184 #include "rs6000-builtin.def"
13187 #undef RS6000_BUILTIN_0
13188 #undef RS6000_BUILTIN_1
13189 #undef RS6000_BUILTIN_2
13190 #undef RS6000_BUILTIN_3
13191 #undef RS6000_BUILTIN_A
13192 #undef RS6000_BUILTIN_D
13193 #undef RS6000_BUILTIN_E
13194 #undef RS6000_BUILTIN_H
13195 #undef RS6000_BUILTIN_P
13196 #undef RS6000_BUILTIN_Q
13197 #undef RS6000_BUILTIN_S
13199 /* Return true if a builtin function is overloaded. */
13200 bool
13201 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13203 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13206 /* Expand an expression EXP that calls a builtin without arguments. */
13207 static rtx
13208 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13210 rtx pat;
13211 machine_mode tmode = insn_data[icode].operand[0].mode;
13213 if (icode == CODE_FOR_nothing)
13214 /* Builtin not supported on this processor. */
13215 return 0;
13217 if (target == 0
13218 || GET_MODE (target) != tmode
13219 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13220 target = gen_reg_rtx (tmode);
13222 pat = GEN_FCN (icode) (target);
13223 if (! pat)
13224 return 0;
13225 emit_insn (pat);
13227 return target;
13231 static rtx
13232 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13234 rtx pat;
13235 tree arg0 = CALL_EXPR_ARG (exp, 0);
13236 tree arg1 = CALL_EXPR_ARG (exp, 1);
13237 rtx op0 = expand_normal (arg0);
13238 rtx op1 = expand_normal (arg1);
13239 machine_mode mode0 = insn_data[icode].operand[0].mode;
13240 machine_mode mode1 = insn_data[icode].operand[1].mode;
13242 if (icode == CODE_FOR_nothing)
13243 /* Builtin not supported on this processor. */
13244 return 0;
13246 /* If we got invalid arguments bail out before generating bad rtl. */
13247 if (arg0 == error_mark_node || arg1 == error_mark_node)
13248 return const0_rtx;
13250 if (GET_CODE (op0) != CONST_INT
13251 || INTVAL (op0) > 255
13252 || INTVAL (op0) < 0)
13254 error ("argument 1 must be an 8-bit field value");
13255 return const0_rtx;
13258 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13259 op0 = copy_to_mode_reg (mode0, op0);
13261 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13262 op1 = copy_to_mode_reg (mode1, op1);
13264 pat = GEN_FCN (icode) (op0, op1);
13265 if (! pat)
13266 return const0_rtx;
13267 emit_insn (pat);
13269 return NULL_RTX;
13272 static rtx
13273 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13275 rtx pat;
13276 tree arg0 = CALL_EXPR_ARG (exp, 0);
13277 rtx op0 = expand_normal (arg0);
13278 machine_mode tmode = insn_data[icode].operand[0].mode;
13279 machine_mode mode0 = insn_data[icode].operand[1].mode;
13281 if (icode == CODE_FOR_nothing)
13282 /* Builtin not supported on this processor. */
13283 return 0;
13285 /* If we got invalid arguments bail out before generating bad rtl. */
13286 if (arg0 == error_mark_node)
13287 return const0_rtx;
13289 if (icode == CODE_FOR_altivec_vspltisb
13290 || icode == CODE_FOR_altivec_vspltish
13291 || icode == CODE_FOR_altivec_vspltisw
13292 || icode == CODE_FOR_spe_evsplatfi
13293 || icode == CODE_FOR_spe_evsplati)
13295 /* Only allow 5-bit *signed* literals. */
13296 if (GET_CODE (op0) != CONST_INT
13297 || INTVAL (op0) > 15
13298 || INTVAL (op0) < -16)
13300 error ("argument 1 must be a 5-bit signed literal");
13301 return const0_rtx;
13305 if (target == 0
13306 || GET_MODE (target) != tmode
13307 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13308 target = gen_reg_rtx (tmode);
13310 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13311 op0 = copy_to_mode_reg (mode0, op0);
13313 pat = GEN_FCN (icode) (target, op0);
13314 if (! pat)
13315 return 0;
13316 emit_insn (pat);
13318 return target;
13321 static rtx
13322 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13324 rtx pat, scratch1, scratch2;
13325 tree arg0 = CALL_EXPR_ARG (exp, 0);
13326 rtx op0 = expand_normal (arg0);
13327 machine_mode tmode = insn_data[icode].operand[0].mode;
13328 machine_mode mode0 = insn_data[icode].operand[1].mode;
13330 /* If we have invalid arguments, bail out before generating bad rtl. */
13331 if (arg0 == error_mark_node)
13332 return const0_rtx;
13334 if (target == 0
13335 || GET_MODE (target) != tmode
13336 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13337 target = gen_reg_rtx (tmode);
13339 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13340 op0 = copy_to_mode_reg (mode0, op0);
13342 scratch1 = gen_reg_rtx (mode0);
13343 scratch2 = gen_reg_rtx (mode0);
13345 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13346 if (! pat)
13347 return 0;
13348 emit_insn (pat);
13350 return target;
13353 static rtx
13354 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13356 rtx pat;
13357 tree arg0 = CALL_EXPR_ARG (exp, 0);
13358 tree arg1 = CALL_EXPR_ARG (exp, 1);
13359 rtx op0 = expand_normal (arg0);
13360 rtx op1 = expand_normal (arg1);
13361 machine_mode tmode = insn_data[icode].operand[0].mode;
13362 machine_mode mode0 = insn_data[icode].operand[1].mode;
13363 machine_mode mode1 = insn_data[icode].operand[2].mode;
13365 if (icode == CODE_FOR_nothing)
13366 /* Builtin not supported on this processor. */
13367 return 0;
13369 /* If we got invalid arguments bail out before generating bad rtl. */
13370 if (arg0 == error_mark_node || arg1 == error_mark_node)
13371 return const0_rtx;
13373 if (icode == CODE_FOR_altivec_vcfux
13374 || icode == CODE_FOR_altivec_vcfsx
13375 || icode == CODE_FOR_altivec_vctsxs
13376 || icode == CODE_FOR_altivec_vctuxs
13377 || icode == CODE_FOR_altivec_vspltb
13378 || icode == CODE_FOR_altivec_vsplth
13379 || icode == CODE_FOR_altivec_vspltw
13380 || icode == CODE_FOR_spe_evaddiw
13381 || icode == CODE_FOR_spe_evldd
13382 || icode == CODE_FOR_spe_evldh
13383 || icode == CODE_FOR_spe_evldw
13384 || icode == CODE_FOR_spe_evlhhesplat
13385 || icode == CODE_FOR_spe_evlhhossplat
13386 || icode == CODE_FOR_spe_evlhhousplat
13387 || icode == CODE_FOR_spe_evlwhe
13388 || icode == CODE_FOR_spe_evlwhos
13389 || icode == CODE_FOR_spe_evlwhou
13390 || icode == CODE_FOR_spe_evlwhsplat
13391 || icode == CODE_FOR_spe_evlwwsplat
13392 || icode == CODE_FOR_spe_evrlwi
13393 || icode == CODE_FOR_spe_evslwi
13394 || icode == CODE_FOR_spe_evsrwis
13395 || icode == CODE_FOR_spe_evsubifw
13396 || icode == CODE_FOR_spe_evsrwiu)
13398 /* Only allow 5-bit unsigned literals. */
13399 STRIP_NOPS (arg1);
13400 if (TREE_CODE (arg1) != INTEGER_CST
13401 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13403 error ("argument 2 must be a 5-bit unsigned literal");
13404 return const0_rtx;
13407 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13408 || icode == CODE_FOR_dfptstsfi_lt_dd
13409 || icode == CODE_FOR_dfptstsfi_gt_dd
13410 || icode == CODE_FOR_dfptstsfi_unordered_dd
13411 || icode == CODE_FOR_dfptstsfi_eq_td
13412 || icode == CODE_FOR_dfptstsfi_lt_td
13413 || icode == CODE_FOR_dfptstsfi_gt_td
13414 || icode == CODE_FOR_dfptstsfi_unordered_td)
13416 /* Only allow 6-bit unsigned literals. */
13417 STRIP_NOPS (arg0);
13418 if (TREE_CODE (arg0) != INTEGER_CST
13419 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13421 error ("argument 1 must be a 6-bit unsigned literal");
13422 return CONST0_RTX (tmode);
13426 if (target == 0
13427 || GET_MODE (target) != tmode
13428 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13429 target = gen_reg_rtx (tmode);
13431 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13432 op0 = copy_to_mode_reg (mode0, op0);
13433 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13434 op1 = copy_to_mode_reg (mode1, op1);
13436 pat = GEN_FCN (icode) (target, op0, op1);
13437 if (! pat)
13438 return 0;
13439 emit_insn (pat);
13441 return target;
13444 static rtx
13445 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13447 rtx pat, scratch;
13448 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13449 tree arg0 = CALL_EXPR_ARG (exp, 1);
13450 tree arg1 = CALL_EXPR_ARG (exp, 2);
13451 rtx op0 = expand_normal (arg0);
13452 rtx op1 = expand_normal (arg1);
13453 machine_mode tmode = SImode;
13454 machine_mode mode0 = insn_data[icode].operand[1].mode;
13455 machine_mode mode1 = insn_data[icode].operand[2].mode;
13456 int cr6_form_int;
13458 if (TREE_CODE (cr6_form) != INTEGER_CST)
13460 error ("argument 1 of __builtin_altivec_predicate must be a constant");
13461 return const0_rtx;
13463 else
13464 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13466 gcc_assert (mode0 == mode1);
13468 /* If we have invalid arguments, bail out before generating bad rtl. */
13469 if (arg0 == error_mark_node || arg1 == error_mark_node)
13470 return const0_rtx;
13472 if (target == 0
13473 || GET_MODE (target) != tmode
13474 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13475 target = gen_reg_rtx (tmode);
13477 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13478 op0 = copy_to_mode_reg (mode0, op0);
13479 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13480 op1 = copy_to_mode_reg (mode1, op1);
13482 scratch = gen_reg_rtx (mode0);
13484 pat = GEN_FCN (icode) (scratch, op0, op1);
13485 if (! pat)
13486 return 0;
13487 emit_insn (pat);
13489 /* The vec_any* and vec_all* predicates use the same opcodes for two
13490 different operations, but the bits in CR6 will be different
13491 depending on what information we want. So we have to play tricks
13492 with CR6 to get the right bits out.
13494 If you think this is disgusting, look at the specs for the
13495 AltiVec predicates. */
13497 switch (cr6_form_int)
13499 case 0:
13500 emit_insn (gen_cr6_test_for_zero (target));
13501 break;
13502 case 1:
13503 emit_insn (gen_cr6_test_for_zero_reverse (target));
13504 break;
13505 case 2:
13506 emit_insn (gen_cr6_test_for_lt (target));
13507 break;
13508 case 3:
13509 emit_insn (gen_cr6_test_for_lt_reverse (target));
13510 break;
13511 default:
13512 error ("argument 1 of __builtin_altivec_predicate is out of range");
13513 break;
13516 return target;
13519 static rtx
13520 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
13522 rtx pat, addr;
13523 tree arg0 = CALL_EXPR_ARG (exp, 0);
13524 tree arg1 = CALL_EXPR_ARG (exp, 1);
13525 machine_mode tmode = insn_data[icode].operand[0].mode;
13526 machine_mode mode0 = Pmode;
13527 machine_mode mode1 = Pmode;
13528 rtx op0 = expand_normal (arg0);
13529 rtx op1 = expand_normal (arg1);
13531 if (icode == CODE_FOR_nothing)
13532 /* Builtin not supported on this processor. */
13533 return 0;
13535 /* If we got invalid arguments bail out before generating bad rtl. */
13536 if (arg0 == error_mark_node || arg1 == error_mark_node)
13537 return const0_rtx;
13539 if (target == 0
13540 || GET_MODE (target) != tmode
13541 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13542 target = gen_reg_rtx (tmode);
13544 op1 = copy_to_mode_reg (mode1, op1);
13546 if (op0 == const0_rtx)
13548 addr = gen_rtx_MEM (tmode, op1);
13550 else
13552 op0 = copy_to_mode_reg (mode0, op0);
13553 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
13556 pat = GEN_FCN (icode) (target, addr);
13558 if (! pat)
13559 return 0;
13560 emit_insn (pat);
13562 return target;
13565 /* Return a constant vector for use as a little-endian permute control vector
13566 to reverse the order of elements of the given vector mode. */
13567 static rtx
13568 swap_selector_for_mode (machine_mode mode)
13570 /* These are little endian vectors, so their elements are reversed
13571 from what you would normally expect for a permute control vector. */
13572 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13573 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13574 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13575 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
13576 unsigned int *swaparray, i;
13577 rtx perm[16];
13579 switch (mode)
13581 case V2DFmode:
13582 case V2DImode:
13583 swaparray = swap2;
13584 break;
13585 case V4SFmode:
13586 case V4SImode:
13587 swaparray = swap4;
13588 break;
13589 case V8HImode:
13590 swaparray = swap8;
13591 break;
13592 case V16QImode:
13593 swaparray = swap16;
13594 break;
13595 default:
13596 gcc_unreachable ();
13599 for (i = 0; i < 16; ++i)
13600 perm[i] = GEN_INT (swaparray[i]);
13602 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
13605 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
13606 with -maltivec=be specified. Issue the load followed by an element-
13607 reversing permute. */
13608 void
13609 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13611 rtx tmp = gen_reg_rtx (mode);
13612 rtx load = gen_rtx_SET (tmp, op1);
13613 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
13614 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
13615 rtx sel = swap_selector_for_mode (mode);
13616 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
13618 gcc_assert (REG_P (op0));
13619 emit_insn (par);
13620 emit_insn (gen_rtx_SET (op0, vperm));
13623 /* Generate code for a "stvxl" built-in for a little endian target with
13624 -maltivec=be specified. Issue the store preceded by an element-reversing
13625 permute. */
13626 void
13627 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13629 rtx tmp = gen_reg_rtx (mode);
13630 rtx store = gen_rtx_SET (op0, tmp);
13631 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
13632 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
13633 rtx sel = swap_selector_for_mode (mode);
13634 rtx vperm;
13636 gcc_assert (REG_P (op1));
13637 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
13638 emit_insn (gen_rtx_SET (tmp, vperm));
13639 emit_insn (par);
13642 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
13643 specified. Issue the store preceded by an element-reversing permute. */
13644 void
13645 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13647 machine_mode inner_mode = GET_MODE_INNER (mode);
13648 rtx tmp = gen_reg_rtx (mode);
13649 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
13650 rtx sel = swap_selector_for_mode (mode);
13651 rtx vperm;
13653 gcc_assert (REG_P (op1));
13654 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
13655 emit_insn (gen_rtx_SET (tmp, vperm));
13656 emit_insn (gen_rtx_SET (op0, stvx));
13659 static rtx
13660 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13662 rtx pat, addr;
13663 tree arg0 = CALL_EXPR_ARG (exp, 0);
13664 tree arg1 = CALL_EXPR_ARG (exp, 1);
13665 machine_mode tmode = insn_data[icode].operand[0].mode;
13666 machine_mode mode0 = Pmode;
13667 machine_mode mode1 = Pmode;
13668 rtx op0 = expand_normal (arg0);
13669 rtx op1 = expand_normal (arg1);
13671 if (icode == CODE_FOR_nothing)
13672 /* Builtin not supported on this processor. */
13673 return 0;
13675 /* If we got invalid arguments bail out before generating bad rtl. */
13676 if (arg0 == error_mark_node || arg1 == error_mark_node)
13677 return const0_rtx;
13679 if (target == 0
13680 || GET_MODE (target) != tmode
13681 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13682 target = gen_reg_rtx (tmode);
13684 op1 = copy_to_mode_reg (mode1, op1);
13686 /* For LVX, express the RTL accurately by ANDing the address with -16.
13687 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13688 so the raw address is fine. */
13689 if (icode == CODE_FOR_altivec_lvx_v2df_2op
13690 || icode == CODE_FOR_altivec_lvx_v2di_2op
13691 || icode == CODE_FOR_altivec_lvx_v4sf_2op
13692 || icode == CODE_FOR_altivec_lvx_v4si_2op
13693 || icode == CODE_FOR_altivec_lvx_v8hi_2op
13694 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
13696 rtx rawaddr;
13697 if (op0 == const0_rtx)
13698 rawaddr = op1;
13699 else
13701 op0 = copy_to_mode_reg (mode0, op0);
13702 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13704 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13705 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13707 /* For -maltivec=be, emit the load and follow it up with a
13708 permute to swap the elements. */
13709 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
13711 rtx temp = gen_reg_rtx (tmode);
13712 emit_insn (gen_rtx_SET (temp, addr));
13714 rtx sel = swap_selector_for_mode (tmode);
13715 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
13716 UNSPEC_VPERM);
13717 emit_insn (gen_rtx_SET (target, vperm));
13719 else
13720 emit_insn (gen_rtx_SET (target, addr));
13722 else
13724 if (op0 == const0_rtx)
13725 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13726 else
13728 op0 = copy_to_mode_reg (mode0, op0);
13729 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13730 gen_rtx_PLUS (Pmode, op1, op0));
13733 pat = GEN_FCN (icode) (target, addr);
13734 if (! pat)
13735 return 0;
13736 emit_insn (pat);
13739 return target;
13742 static rtx
13743 spe_expand_stv_builtin (enum insn_code icode, tree exp)
13745 tree arg0 = CALL_EXPR_ARG (exp, 0);
13746 tree arg1 = CALL_EXPR_ARG (exp, 1);
13747 tree arg2 = CALL_EXPR_ARG (exp, 2);
13748 rtx op0 = expand_normal (arg0);
13749 rtx op1 = expand_normal (arg1);
13750 rtx op2 = expand_normal (arg2);
13751 rtx pat;
13752 machine_mode mode0 = insn_data[icode].operand[0].mode;
13753 machine_mode mode1 = insn_data[icode].operand[1].mode;
13754 machine_mode mode2 = insn_data[icode].operand[2].mode;
13756 /* Invalid arguments. Bail before doing anything stoopid! */
13757 if (arg0 == error_mark_node
13758 || arg1 == error_mark_node
13759 || arg2 == error_mark_node)
13760 return const0_rtx;
13762 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
13763 op0 = copy_to_mode_reg (mode2, op0);
13764 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
13765 op1 = copy_to_mode_reg (mode0, op1);
13766 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
13767 op2 = copy_to_mode_reg (mode1, op2);
13769 pat = GEN_FCN (icode) (op1, op2, op0);
13770 if (pat)
13771 emit_insn (pat);
13772 return NULL_RTX;
13775 static rtx
13776 paired_expand_stv_builtin (enum insn_code icode, tree exp)
13778 tree arg0 = CALL_EXPR_ARG (exp, 0);
13779 tree arg1 = CALL_EXPR_ARG (exp, 1);
13780 tree arg2 = CALL_EXPR_ARG (exp, 2);
13781 rtx op0 = expand_normal (arg0);
13782 rtx op1 = expand_normal (arg1);
13783 rtx op2 = expand_normal (arg2);
13784 rtx pat, addr;
13785 machine_mode tmode = insn_data[icode].operand[0].mode;
13786 machine_mode mode1 = Pmode;
13787 machine_mode mode2 = Pmode;
13789 /* Invalid arguments. Bail before doing anything stoopid! */
13790 if (arg0 == error_mark_node
13791 || arg1 == error_mark_node
13792 || arg2 == error_mark_node)
13793 return const0_rtx;
13795 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
13796 op0 = copy_to_mode_reg (tmode, op0);
13798 op2 = copy_to_mode_reg (mode2, op2);
13800 if (op1 == const0_rtx)
13802 addr = gen_rtx_MEM (tmode, op2);
13804 else
13806 op1 = copy_to_mode_reg (mode1, op1);
13807 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
13810 pat = GEN_FCN (icode) (addr, op0);
13811 if (pat)
13812 emit_insn (pat);
13813 return NULL_RTX;
13816 static rtx
13817 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13819 tree arg0 = CALL_EXPR_ARG (exp, 0);
13820 tree arg1 = CALL_EXPR_ARG (exp, 1);
13821 tree arg2 = CALL_EXPR_ARG (exp, 2);
13822 rtx op0 = expand_normal (arg0);
13823 rtx op1 = expand_normal (arg1);
13824 rtx op2 = expand_normal (arg2);
13825 rtx pat, addr, rawaddr;
13826 machine_mode tmode = insn_data[icode].operand[0].mode;
13827 machine_mode smode = insn_data[icode].operand[1].mode;
13828 machine_mode mode1 = Pmode;
13829 machine_mode mode2 = Pmode;
13831 /* Invalid arguments. Bail before doing anything stoopid! */
13832 if (arg0 == error_mark_node
13833 || arg1 == error_mark_node
13834 || arg2 == error_mark_node)
13835 return const0_rtx;
13837 op2 = copy_to_mode_reg (mode2, op2);
13839 /* For STVX, express the RTL accurately by ANDing the address with -16.
13840 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13841 so the raw address is fine. */
13842 if (icode == CODE_FOR_altivec_stvx_v2df_2op
13843 || icode == CODE_FOR_altivec_stvx_v2di_2op
13844 || icode == CODE_FOR_altivec_stvx_v4sf_2op
13845 || icode == CODE_FOR_altivec_stvx_v4si_2op
13846 || icode == CODE_FOR_altivec_stvx_v8hi_2op
13847 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
13849 if (op1 == const0_rtx)
13850 rawaddr = op2;
13851 else
13853 op1 = copy_to_mode_reg (mode1, op1);
13854 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13857 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13858 addr = gen_rtx_MEM (tmode, addr);
13860 op0 = copy_to_mode_reg (tmode, op0);
13862 /* For -maltivec=be, emit a permute to swap the elements, followed
13863 by the store. */
13864 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
13866 rtx temp = gen_reg_rtx (tmode);
13867 rtx sel = swap_selector_for_mode (tmode);
13868 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
13869 UNSPEC_VPERM);
13870 emit_insn (gen_rtx_SET (temp, vperm));
13871 emit_insn (gen_rtx_SET (addr, temp));
13873 else
13874 emit_insn (gen_rtx_SET (addr, op0));
13876 else
13878 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13879 op0 = copy_to_mode_reg (smode, op0);
13881 if (op1 == const0_rtx)
13882 addr = gen_rtx_MEM (tmode, op2);
13883 else
13885 op1 = copy_to_mode_reg (mode1, op1);
13886 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13889 pat = GEN_FCN (icode) (addr, op0);
13890 if (pat)
13891 emit_insn (pat);
13894 return NULL_RTX;
13897 /* Return the appropriate SPR number associated with the given builtin. */
13898 static inline HOST_WIDE_INT
13899 htm_spr_num (enum rs6000_builtins code)
13901 if (code == HTM_BUILTIN_GET_TFHAR
13902 || code == HTM_BUILTIN_SET_TFHAR)
13903 return TFHAR_SPR;
13904 else if (code == HTM_BUILTIN_GET_TFIAR
13905 || code == HTM_BUILTIN_SET_TFIAR)
13906 return TFIAR_SPR;
13907 else if (code == HTM_BUILTIN_GET_TEXASR
13908 || code == HTM_BUILTIN_SET_TEXASR)
13909 return TEXASR_SPR;
13910 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13911 || code == HTM_BUILTIN_SET_TEXASRU);
13912 return TEXASRU_SPR;
13915 /* Return the appropriate SPR regno associated with the given builtin. */
13916 static inline HOST_WIDE_INT
13917 htm_spr_regno (enum rs6000_builtins code)
13919 if (code == HTM_BUILTIN_GET_TFHAR
13920 || code == HTM_BUILTIN_SET_TFHAR)
13921 return TFHAR_REGNO;
13922 else if (code == HTM_BUILTIN_GET_TFIAR
13923 || code == HTM_BUILTIN_SET_TFIAR)
13924 return TFIAR_REGNO;
13925 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
13926 || code == HTM_BUILTIN_SET_TEXASR
13927 || code == HTM_BUILTIN_GET_TEXASRU
13928 || code == HTM_BUILTIN_SET_TEXASRU);
13929 return TEXASR_REGNO;
13932 /* Return the correct ICODE value depending on whether we are
13933 setting or reading the HTM SPRs. */
13934 static inline enum insn_code
13935 rs6000_htm_spr_icode (bool nonvoid)
13937 if (nonvoid)
13938 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13939 else
13940 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13943 /* Expand the HTM builtin in EXP and store the result in TARGET.
13944 Store true in *EXPANDEDP if we found a builtin to expand. */
13945 static rtx
13946 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13948 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13949 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13950 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13951 const struct builtin_description *d;
13952 size_t i;
13954 *expandedp = true;
13956 if (!TARGET_POWERPC64
13957 && (fcode == HTM_BUILTIN_TABORTDC
13958 || fcode == HTM_BUILTIN_TABORTDCI))
13960 size_t uns_fcode = (size_t)fcode;
13961 const char *name = rs6000_builtin_info[uns_fcode].name;
13962 error ("builtin %s is only valid in 64-bit mode", name);
13963 return const0_rtx;
13966 /* Expand the HTM builtins. */
13967 d = bdesc_htm;
13968 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13969 if (d->code == fcode)
13971 rtx op[MAX_HTM_OPERANDS], pat;
13972 int nopnds = 0;
13973 tree arg;
13974 call_expr_arg_iterator iter;
13975 unsigned attr = rs6000_builtin_info[fcode].attr;
13976 enum insn_code icode = d->icode;
13977 const struct insn_operand_data *insn_op;
13978 bool uses_spr = (attr & RS6000_BTC_SPR);
13979 rtx cr = NULL_RTX;
13981 if (uses_spr)
13982 icode = rs6000_htm_spr_icode (nonvoid);
13983 insn_op = &insn_data[icode].operand[0];
13985 if (nonvoid)
13987 machine_mode tmode = (uses_spr) ? insn_op->mode : SImode;
13988 if (!target
13989 || GET_MODE (target) != tmode
13990 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13991 target = gen_reg_rtx (tmode);
13992 if (uses_spr)
13993 op[nopnds++] = target;
13996 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13998 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13999 return const0_rtx;
14001 insn_op = &insn_data[icode].operand[nopnds];
14003 op[nopnds] = expand_normal (arg);
14005 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14007 if (!strcmp (insn_op->constraint, "n"))
14009 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14010 if (!CONST_INT_P (op[nopnds]))
14011 error ("argument %d must be an unsigned literal", arg_num);
14012 else
14013 error ("argument %d is an unsigned literal that is "
14014 "out of range", arg_num);
14015 return const0_rtx;
14017 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14020 nopnds++;
14023 /* Handle the builtins for extended mnemonics. These accept
14024 no arguments, but map to builtins that take arguments. */
14025 switch (fcode)
14027 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14028 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14029 op[nopnds++] = GEN_INT (1);
14030 if (flag_checking)
14031 attr |= RS6000_BTC_UNARY;
14032 break;
14033 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14034 op[nopnds++] = GEN_INT (0);
14035 if (flag_checking)
14036 attr |= RS6000_BTC_UNARY;
14037 break;
14038 default:
14039 break;
14042 /* If this builtin accesses SPRs, then pass in the appropriate
14043 SPR number and SPR regno as the last two operands. */
14044 if (uses_spr)
14046 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14047 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14048 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14050 /* If this builtin accesses a CR, then pass in a scratch
14051 CR as the last operand. */
14052 else if (attr & RS6000_BTC_CR)
14053 { cr = gen_reg_rtx (CCmode);
14054 op[nopnds++] = cr;
14057 if (flag_checking)
14059 int expected_nopnds = 0;
14060 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14061 expected_nopnds = 1;
14062 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14063 expected_nopnds = 2;
14064 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14065 expected_nopnds = 3;
14066 if (!(attr & RS6000_BTC_VOID))
14067 expected_nopnds += 1;
14068 if (uses_spr)
14069 expected_nopnds += 2;
14071 gcc_assert (nopnds == expected_nopnds
14072 && nopnds <= MAX_HTM_OPERANDS);
14075 switch (nopnds)
14077 case 1:
14078 pat = GEN_FCN (icode) (op[0]);
14079 break;
14080 case 2:
14081 pat = GEN_FCN (icode) (op[0], op[1]);
14082 break;
14083 case 3:
14084 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14085 break;
14086 case 4:
14087 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14088 break;
14089 default:
14090 gcc_unreachable ();
14092 if (!pat)
14093 return NULL_RTX;
14094 emit_insn (pat);
14096 if (attr & RS6000_BTC_CR)
14098 if (fcode == HTM_BUILTIN_TBEGIN)
14100 /* Emit code to set TARGET to true or false depending on
14101 whether the tbegin. instruction successfully or failed
14102 to start a transaction. We do this by placing the 1's
14103 complement of CR's EQ bit into TARGET. */
14104 rtx scratch = gen_reg_rtx (SImode);
14105 emit_insn (gen_rtx_SET (scratch,
14106 gen_rtx_EQ (SImode, cr,
14107 const0_rtx)));
14108 emit_insn (gen_rtx_SET (target,
14109 gen_rtx_XOR (SImode, scratch,
14110 GEN_INT (1))));
14112 else
14114 /* Emit code to copy the 4-bit condition register field
14115 CR into the least significant end of register TARGET. */
14116 rtx scratch1 = gen_reg_rtx (SImode);
14117 rtx scratch2 = gen_reg_rtx (SImode);
14118 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14119 emit_insn (gen_movcc (subreg, cr));
14120 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14121 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14125 if (nonvoid)
14126 return target;
14127 return const0_rtx;
14130 *expandedp = false;
14131 return NULL_RTX;
14134 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14136 static rtx
14137 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14138 rtx target)
14140 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14141 if (fcode == RS6000_BUILTIN_CPU_INIT)
14142 return const0_rtx;
14144 if (target == 0 || GET_MODE (target) != SImode)
14145 target = gen_reg_rtx (SImode);
14147 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14148 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14149 if (TREE_CODE (arg) != STRING_CST)
14151 error ("builtin %s only accepts a string argument",
14152 rs6000_builtin_info[(size_t) fcode].name);
14153 return const0_rtx;
14156 if (fcode == RS6000_BUILTIN_CPU_IS)
14158 const char *cpu = TREE_STRING_POINTER (arg);
14159 rtx cpuid = NULL_RTX;
14160 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14161 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14163 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14164 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14165 break;
14167 if (cpuid == NULL_RTX)
14169 /* Invalid CPU argument. */
14170 error ("cpu %s is an invalid argument to builtin %s",
14171 cpu, rs6000_builtin_info[(size_t) fcode].name);
14172 return const0_rtx;
14175 rtx platform = gen_reg_rtx (SImode);
14176 rtx tcbmem = gen_const_mem (SImode,
14177 gen_rtx_PLUS (Pmode,
14178 gen_rtx_REG (Pmode, TLS_REGNUM),
14179 GEN_INT (TCB_PLATFORM_OFFSET)));
14180 emit_move_insn (platform, tcbmem);
14181 emit_insn (gen_eqsi3 (target, platform, cpuid));
14183 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14185 const char *hwcap = TREE_STRING_POINTER (arg);
14186 rtx mask = NULL_RTX;
14187 int hwcap_offset;
14188 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14189 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14191 mask = GEN_INT (cpu_supports_info[i].mask);
14192 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14193 break;
14195 if (mask == NULL_RTX)
14197 /* Invalid HWCAP argument. */
14198 error ("hwcap %s is an invalid argument to builtin %s",
14199 hwcap, rs6000_builtin_info[(size_t) fcode].name);
14200 return const0_rtx;
14203 rtx tcb_hwcap = gen_reg_rtx (SImode);
14204 rtx tcbmem = gen_const_mem (SImode,
14205 gen_rtx_PLUS (Pmode,
14206 gen_rtx_REG (Pmode, TLS_REGNUM),
14207 GEN_INT (hwcap_offset)));
14208 emit_move_insn (tcb_hwcap, tcbmem);
14209 rtx scratch1 = gen_reg_rtx (SImode);
14210 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14211 rtx scratch2 = gen_reg_rtx (SImode);
14212 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14213 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14216 /* Record that we have expanded a CPU builtin, so that we can later
14217 emit a reference to the special symbol exported by LIBC to ensure we
14218 do not link against an old LIBC that doesn't support this feature. */
14219 cpu_builtin_p = true;
14221 #else
14222 /* For old LIBCs, always return FALSE. */
14223 emit_move_insn (target, GEN_INT (0));
14224 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14226 return target;
14229 static rtx
14230 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14232 rtx pat;
14233 tree arg0 = CALL_EXPR_ARG (exp, 0);
14234 tree arg1 = CALL_EXPR_ARG (exp, 1);
14235 tree arg2 = CALL_EXPR_ARG (exp, 2);
14236 rtx op0 = expand_normal (arg0);
14237 rtx op1 = expand_normal (arg1);
14238 rtx op2 = expand_normal (arg2);
14239 machine_mode tmode = insn_data[icode].operand[0].mode;
14240 machine_mode mode0 = insn_data[icode].operand[1].mode;
14241 machine_mode mode1 = insn_data[icode].operand[2].mode;
14242 machine_mode mode2 = insn_data[icode].operand[3].mode;
14244 if (icode == CODE_FOR_nothing)
14245 /* Builtin not supported on this processor. */
14246 return 0;
14248 /* If we got invalid arguments bail out before generating bad rtl. */
14249 if (arg0 == error_mark_node
14250 || arg1 == error_mark_node
14251 || arg2 == error_mark_node)
14252 return const0_rtx;
14254 /* Check and prepare argument depending on the instruction code.
14256 Note that a switch statement instead of the sequence of tests
14257 would be incorrect as many of the CODE_FOR values could be
14258 CODE_FOR_nothing and that would yield multiple alternatives
14259 with identical values. We'd never reach here at runtime in
14260 this case. */
14261 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14262 || icode == CODE_FOR_altivec_vsldoi_v4si
14263 || icode == CODE_FOR_altivec_vsldoi_v8hi
14264 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14266 /* Only allow 4-bit unsigned literals. */
14267 STRIP_NOPS (arg2);
14268 if (TREE_CODE (arg2) != INTEGER_CST
14269 || TREE_INT_CST_LOW (arg2) & ~0xf)
14271 error ("argument 3 must be a 4-bit unsigned literal");
14272 return const0_rtx;
14275 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14276 || icode == CODE_FOR_vsx_xxpermdi_v2di
14277 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14278 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14279 || icode == CODE_FOR_vsx_xxsldwi_v4si
14280 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14281 || icode == CODE_FOR_vsx_xxsldwi_v2di
14282 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14284 /* Only allow 2-bit unsigned literals. */
14285 STRIP_NOPS (arg2);
14286 if (TREE_CODE (arg2) != INTEGER_CST
14287 || TREE_INT_CST_LOW (arg2) & ~0x3)
14289 error ("argument 3 must be a 2-bit unsigned literal");
14290 return const0_rtx;
14293 else if (icode == CODE_FOR_vsx_set_v2df
14294 || icode == CODE_FOR_vsx_set_v2di
14295 || icode == CODE_FOR_bcdadd
14296 || icode == CODE_FOR_bcdadd_lt
14297 || icode == CODE_FOR_bcdadd_eq
14298 || icode == CODE_FOR_bcdadd_gt
14299 || icode == CODE_FOR_bcdsub
14300 || icode == CODE_FOR_bcdsub_lt
14301 || icode == CODE_FOR_bcdsub_eq
14302 || icode == CODE_FOR_bcdsub_gt)
14304 /* Only allow 1-bit unsigned literals. */
14305 STRIP_NOPS (arg2);
14306 if (TREE_CODE (arg2) != INTEGER_CST
14307 || TREE_INT_CST_LOW (arg2) & ~0x1)
14309 error ("argument 3 must be a 1-bit unsigned literal");
14310 return const0_rtx;
14313 else if (icode == CODE_FOR_dfp_ddedpd_dd
14314 || icode == CODE_FOR_dfp_ddedpd_td)
14316 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14317 STRIP_NOPS (arg0);
14318 if (TREE_CODE (arg0) != INTEGER_CST
14319 || TREE_INT_CST_LOW (arg2) & ~0x3)
14321 error ("argument 1 must be 0 or 2");
14322 return const0_rtx;
14325 else if (icode == CODE_FOR_dfp_denbcd_dd
14326 || icode == CODE_FOR_dfp_denbcd_td)
14328 /* Only allow 1-bit unsigned literals. */
14329 STRIP_NOPS (arg0);
14330 if (TREE_CODE (arg0) != INTEGER_CST
14331 || TREE_INT_CST_LOW (arg0) & ~0x1)
14333 error ("argument 1 must be a 1-bit unsigned literal");
14334 return const0_rtx;
14337 else if (icode == CODE_FOR_dfp_dscli_dd
14338 || icode == CODE_FOR_dfp_dscli_td
14339 || icode == CODE_FOR_dfp_dscri_dd
14340 || icode == CODE_FOR_dfp_dscri_td)
14342 /* Only allow 6-bit unsigned literals. */
14343 STRIP_NOPS (arg1);
14344 if (TREE_CODE (arg1) != INTEGER_CST
14345 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14347 error ("argument 2 must be a 6-bit unsigned literal");
14348 return const0_rtx;
14351 else if (icode == CODE_FOR_crypto_vshasigmaw
14352 || icode == CODE_FOR_crypto_vshasigmad)
14354 /* Check whether the 2nd and 3rd arguments are integer constants and in
14355 range and prepare arguments. */
14356 STRIP_NOPS (arg1);
14357 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
14359 error ("argument 2 must be 0 or 1");
14360 return const0_rtx;
14363 STRIP_NOPS (arg2);
14364 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
14366 error ("argument 3 must be in the range 0..15");
14367 return const0_rtx;
14371 if (target == 0
14372 || GET_MODE (target) != tmode
14373 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14374 target = gen_reg_rtx (tmode);
14376 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14377 op0 = copy_to_mode_reg (mode0, op0);
14378 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14379 op1 = copy_to_mode_reg (mode1, op1);
14380 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14381 op2 = copy_to_mode_reg (mode2, op2);
14383 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
14384 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
14385 else
14386 pat = GEN_FCN (icode) (target, op0, op1, op2);
14387 if (! pat)
14388 return 0;
14389 emit_insn (pat);
14391 return target;
14394 /* Expand the lvx builtins. */
14395 static rtx
14396 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
14398 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14399 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14400 tree arg0;
14401 machine_mode tmode, mode0;
14402 rtx pat, op0;
14403 enum insn_code icode;
14405 switch (fcode)
14407 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
14408 icode = CODE_FOR_vector_altivec_load_v16qi;
14409 break;
14410 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
14411 icode = CODE_FOR_vector_altivec_load_v8hi;
14412 break;
14413 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
14414 icode = CODE_FOR_vector_altivec_load_v4si;
14415 break;
14416 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
14417 icode = CODE_FOR_vector_altivec_load_v4sf;
14418 break;
14419 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
14420 icode = CODE_FOR_vector_altivec_load_v2df;
14421 break;
14422 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
14423 icode = CODE_FOR_vector_altivec_load_v2di;
14424 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
14425 icode = CODE_FOR_vector_altivec_load_v1ti;
14426 break;
14427 default:
14428 *expandedp = false;
14429 return NULL_RTX;
14432 *expandedp = true;
14434 arg0 = CALL_EXPR_ARG (exp, 0);
14435 op0 = expand_normal (arg0);
14436 tmode = insn_data[icode].operand[0].mode;
14437 mode0 = insn_data[icode].operand[1].mode;
14439 if (target == 0
14440 || GET_MODE (target) != tmode
14441 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14442 target = gen_reg_rtx (tmode);
14444 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14445 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14447 pat = GEN_FCN (icode) (target, op0);
14448 if (! pat)
14449 return 0;
14450 emit_insn (pat);
14451 return target;
14454 /* Expand the stvx builtins. */
14455 static rtx
14456 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14457 bool *expandedp)
14459 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14460 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14461 tree arg0, arg1;
14462 machine_mode mode0, mode1;
14463 rtx pat, op0, op1;
14464 enum insn_code icode;
14466 switch (fcode)
14468 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
14469 icode = CODE_FOR_vector_altivec_store_v16qi;
14470 break;
14471 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
14472 icode = CODE_FOR_vector_altivec_store_v8hi;
14473 break;
14474 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
14475 icode = CODE_FOR_vector_altivec_store_v4si;
14476 break;
14477 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
14478 icode = CODE_FOR_vector_altivec_store_v4sf;
14479 break;
14480 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
14481 icode = CODE_FOR_vector_altivec_store_v2df;
14482 break;
14483 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
14484 icode = CODE_FOR_vector_altivec_store_v2di;
14485 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
14486 icode = CODE_FOR_vector_altivec_store_v1ti;
14487 break;
14488 default:
14489 *expandedp = false;
14490 return NULL_RTX;
14493 arg0 = CALL_EXPR_ARG (exp, 0);
14494 arg1 = CALL_EXPR_ARG (exp, 1);
14495 op0 = expand_normal (arg0);
14496 op1 = expand_normal (arg1);
14497 mode0 = insn_data[icode].operand[0].mode;
14498 mode1 = insn_data[icode].operand[1].mode;
14500 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14501 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14502 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14503 op1 = copy_to_mode_reg (mode1, op1);
14505 pat = GEN_FCN (icode) (op0, op1);
14506 if (pat)
14507 emit_insn (pat);
14509 *expandedp = true;
14510 return NULL_RTX;
14513 /* Expand the dst builtins. */
14514 static rtx
14515 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14516 bool *expandedp)
14518 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14519 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14520 tree arg0, arg1, arg2;
14521 machine_mode mode0, mode1;
14522 rtx pat, op0, op1, op2;
14523 const struct builtin_description *d;
14524 size_t i;
14526 *expandedp = false;
14528 /* Handle DST variants. */
14529 d = bdesc_dst;
14530 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14531 if (d->code == fcode)
14533 arg0 = CALL_EXPR_ARG (exp, 0);
14534 arg1 = CALL_EXPR_ARG (exp, 1);
14535 arg2 = CALL_EXPR_ARG (exp, 2);
14536 op0 = expand_normal (arg0);
14537 op1 = expand_normal (arg1);
14538 op2 = expand_normal (arg2);
14539 mode0 = insn_data[d->icode].operand[0].mode;
14540 mode1 = insn_data[d->icode].operand[1].mode;
14542 /* Invalid arguments, bail out before generating bad rtl. */
14543 if (arg0 == error_mark_node
14544 || arg1 == error_mark_node
14545 || arg2 == error_mark_node)
14546 return const0_rtx;
14548 *expandedp = true;
14549 STRIP_NOPS (arg2);
14550 if (TREE_CODE (arg2) != INTEGER_CST
14551 || TREE_INT_CST_LOW (arg2) & ~0x3)
14553 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14554 return const0_rtx;
14557 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14558 op0 = copy_to_mode_reg (Pmode, op0);
14559 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14560 op1 = copy_to_mode_reg (mode1, op1);
14562 pat = GEN_FCN (d->icode) (op0, op1, op2);
14563 if (pat != 0)
14564 emit_insn (pat);
14566 return NULL_RTX;
14569 return NULL_RTX;
14572 /* Expand vec_init builtin. */
14573 static rtx
14574 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14576 machine_mode tmode = TYPE_MODE (type);
14577 machine_mode inner_mode = GET_MODE_INNER (tmode);
14578 int i, n_elt = GET_MODE_NUNITS (tmode);
14580 gcc_assert (VECTOR_MODE_P (tmode));
14581 gcc_assert (n_elt == call_expr_nargs (exp));
14583 if (!target || !register_operand (target, tmode))
14584 target = gen_reg_rtx (tmode);
14586 /* If we have a vector compromised of a single element, such as V1TImode, do
14587 the initialization directly. */
14588 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14590 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14591 emit_move_insn (target, gen_lowpart (tmode, x));
14593 else
14595 rtvec v = rtvec_alloc (n_elt);
14597 for (i = 0; i < n_elt; ++i)
14599 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14600 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14603 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14606 return target;
14609 /* Return the integer constant in ARG. Constrain it to be in the range
14610 of the subparts of VEC_TYPE; issue an error if not. */
14612 static int
14613 get_element_number (tree vec_type, tree arg)
14615 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14617 if (!tree_fits_uhwi_p (arg)
14618 || (elt = tree_to_uhwi (arg), elt > max))
14620 error ("selector must be an integer constant in the range 0..%wi", max);
14621 return 0;
14624 return elt;
14627 /* Expand vec_set builtin. */
14628 static rtx
14629 altivec_expand_vec_set_builtin (tree exp)
14631 machine_mode tmode, mode1;
14632 tree arg0, arg1, arg2;
14633 int elt;
14634 rtx op0, op1;
14636 arg0 = CALL_EXPR_ARG (exp, 0);
14637 arg1 = CALL_EXPR_ARG (exp, 1);
14638 arg2 = CALL_EXPR_ARG (exp, 2);
14640 tmode = TYPE_MODE (TREE_TYPE (arg0));
14641 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14642 gcc_assert (VECTOR_MODE_P (tmode));
14644 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14645 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14646 elt = get_element_number (TREE_TYPE (arg0), arg2);
14648 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14649 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14651 op0 = force_reg (tmode, op0);
14652 op1 = force_reg (mode1, op1);
14654 rs6000_expand_vector_set (op0, op1, elt);
14656 return op0;
14659 /* Expand vec_ext builtin. */
14660 static rtx
14661 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14663 machine_mode tmode, mode0;
14664 tree arg0, arg1;
14665 rtx op0;
14666 rtx op1;
14668 arg0 = CALL_EXPR_ARG (exp, 0);
14669 arg1 = CALL_EXPR_ARG (exp, 1);
14671 op0 = expand_normal (arg0);
14672 op1 = expand_normal (arg1);
14674 /* Call get_element_number to validate arg1 if it is a constant. */
14675 if (TREE_CODE (arg1) == INTEGER_CST)
14676 (void) get_element_number (TREE_TYPE (arg0), arg1);
14678 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14679 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14680 gcc_assert (VECTOR_MODE_P (mode0));
14682 op0 = force_reg (mode0, op0);
14684 if (optimize || !target || !register_operand (target, tmode))
14685 target = gen_reg_rtx (tmode);
14687 rs6000_expand_vector_extract (target, op0, op1);
14689 return target;
14692 /* Expand the builtin in EXP and store the result in TARGET. Store
14693 true in *EXPANDEDP if we found a builtin to expand. */
14694 static rtx
14695 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14697 const struct builtin_description *d;
14698 size_t i;
14699 enum insn_code icode;
14700 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14701 tree arg0;
14702 rtx op0, pat;
14703 machine_mode tmode, mode0;
14704 enum rs6000_builtins fcode
14705 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14707 if (rs6000_overloaded_builtin_p (fcode))
14709 *expandedp = true;
14710 error ("unresolved overload for Altivec builtin %qF", fndecl);
14712 /* Given it is invalid, just generate a normal call. */
14713 return expand_call (exp, target, false);
14716 target = altivec_expand_ld_builtin (exp, target, expandedp);
14717 if (*expandedp)
14718 return target;
14720 target = altivec_expand_st_builtin (exp, target, expandedp);
14721 if (*expandedp)
14722 return target;
14724 target = altivec_expand_dst_builtin (exp, target, expandedp);
14725 if (*expandedp)
14726 return target;
14728 *expandedp = true;
14730 switch (fcode)
14732 case ALTIVEC_BUILTIN_STVX_V2DF:
14733 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
14734 case ALTIVEC_BUILTIN_STVX_V2DI:
14735 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
14736 case ALTIVEC_BUILTIN_STVX_V4SF:
14737 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
14738 case ALTIVEC_BUILTIN_STVX:
14739 case ALTIVEC_BUILTIN_STVX_V4SI:
14740 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
14741 case ALTIVEC_BUILTIN_STVX_V8HI:
14742 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
14743 case ALTIVEC_BUILTIN_STVX_V16QI:
14744 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
14745 case ALTIVEC_BUILTIN_STVEBX:
14746 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14747 case ALTIVEC_BUILTIN_STVEHX:
14748 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14749 case ALTIVEC_BUILTIN_STVEWX:
14750 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14751 case ALTIVEC_BUILTIN_STVXL_V2DF:
14752 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14753 case ALTIVEC_BUILTIN_STVXL_V2DI:
14754 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14755 case ALTIVEC_BUILTIN_STVXL_V4SF:
14756 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14757 case ALTIVEC_BUILTIN_STVXL:
14758 case ALTIVEC_BUILTIN_STVXL_V4SI:
14759 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14760 case ALTIVEC_BUILTIN_STVXL_V8HI:
14761 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14762 case ALTIVEC_BUILTIN_STVXL_V16QI:
14763 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14765 case ALTIVEC_BUILTIN_STVLX:
14766 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14767 case ALTIVEC_BUILTIN_STVLXL:
14768 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14769 case ALTIVEC_BUILTIN_STVRX:
14770 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14771 case ALTIVEC_BUILTIN_STVRXL:
14772 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14774 case VSX_BUILTIN_STXVD2X_V1TI:
14775 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14776 case VSX_BUILTIN_STXVD2X_V2DF:
14777 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14778 case VSX_BUILTIN_STXVD2X_V2DI:
14779 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14780 case VSX_BUILTIN_STXVW4X_V4SF:
14781 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14782 case VSX_BUILTIN_STXVW4X_V4SI:
14783 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14784 case VSX_BUILTIN_STXVW4X_V8HI:
14785 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14786 case VSX_BUILTIN_STXVW4X_V16QI:
14787 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14789 /* For the following on big endian, it's ok to use any appropriate
14790 unaligned-supporting store, so use a generic expander. For
14791 little-endian, the exact element-reversing instruction must
14792 be used. */
14793 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14795 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14796 : CODE_FOR_vsx_st_elemrev_v2df);
14797 return altivec_expand_stv_builtin (code, exp);
14799 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14801 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14802 : CODE_FOR_vsx_st_elemrev_v2di);
14803 return altivec_expand_stv_builtin (code, exp);
14805 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14807 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14808 : CODE_FOR_vsx_st_elemrev_v4sf);
14809 return altivec_expand_stv_builtin (code, exp);
14811 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14813 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14814 : CODE_FOR_vsx_st_elemrev_v4si);
14815 return altivec_expand_stv_builtin (code, exp);
14817 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14819 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14820 : CODE_FOR_vsx_st_elemrev_v8hi);
14821 return altivec_expand_stv_builtin (code, exp);
14823 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14825 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14826 : CODE_FOR_vsx_st_elemrev_v16qi);
14827 return altivec_expand_stv_builtin (code, exp);
14830 case ALTIVEC_BUILTIN_MFVSCR:
14831 icode = CODE_FOR_altivec_mfvscr;
14832 tmode = insn_data[icode].operand[0].mode;
14834 if (target == 0
14835 || GET_MODE (target) != tmode
14836 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14837 target = gen_reg_rtx (tmode);
14839 pat = GEN_FCN (icode) (target);
14840 if (! pat)
14841 return 0;
14842 emit_insn (pat);
14843 return target;
14845 case ALTIVEC_BUILTIN_MTVSCR:
14846 icode = CODE_FOR_altivec_mtvscr;
14847 arg0 = CALL_EXPR_ARG (exp, 0);
14848 op0 = expand_normal (arg0);
14849 mode0 = insn_data[icode].operand[0].mode;
14851 /* If we got invalid arguments bail out before generating bad rtl. */
14852 if (arg0 == error_mark_node)
14853 return const0_rtx;
14855 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14856 op0 = copy_to_mode_reg (mode0, op0);
14858 pat = GEN_FCN (icode) (op0);
14859 if (pat)
14860 emit_insn (pat);
14861 return NULL_RTX;
14863 case ALTIVEC_BUILTIN_DSSALL:
14864 emit_insn (gen_altivec_dssall ());
14865 return NULL_RTX;
14867 case ALTIVEC_BUILTIN_DSS:
14868 icode = CODE_FOR_altivec_dss;
14869 arg0 = CALL_EXPR_ARG (exp, 0);
14870 STRIP_NOPS (arg0);
14871 op0 = expand_normal (arg0);
14872 mode0 = insn_data[icode].operand[0].mode;
14874 /* If we got invalid arguments bail out before generating bad rtl. */
14875 if (arg0 == error_mark_node)
14876 return const0_rtx;
14878 if (TREE_CODE (arg0) != INTEGER_CST
14879 || TREE_INT_CST_LOW (arg0) & ~0x3)
14881 error ("argument to dss must be a 2-bit unsigned literal");
14882 return const0_rtx;
14885 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14886 op0 = copy_to_mode_reg (mode0, op0);
14888 emit_insn (gen_altivec_dss (op0));
14889 return NULL_RTX;
14891 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14892 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14893 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14894 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14895 case VSX_BUILTIN_VEC_INIT_V2DF:
14896 case VSX_BUILTIN_VEC_INIT_V2DI:
14897 case VSX_BUILTIN_VEC_INIT_V1TI:
14898 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14900 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14901 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14902 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14903 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14904 case VSX_BUILTIN_VEC_SET_V2DF:
14905 case VSX_BUILTIN_VEC_SET_V2DI:
14906 case VSX_BUILTIN_VEC_SET_V1TI:
14907 return altivec_expand_vec_set_builtin (exp);
14909 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14910 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14911 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14912 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14913 case VSX_BUILTIN_VEC_EXT_V2DF:
14914 case VSX_BUILTIN_VEC_EXT_V2DI:
14915 case VSX_BUILTIN_VEC_EXT_V1TI:
14916 return altivec_expand_vec_ext_builtin (exp, target);
14918 default:
14919 break;
14920 /* Fall through. */
14923 /* Expand abs* operations. */
14924 d = bdesc_abs;
14925 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14926 if (d->code == fcode)
14927 return altivec_expand_abs_builtin (d->icode, exp, target);
14929 /* Expand the AltiVec predicates. */
14930 d = bdesc_altivec_preds;
14931 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14932 if (d->code == fcode)
14933 return altivec_expand_predicate_builtin (d->icode, exp, target);
14935 /* LV* are funky. We initialized them differently. */
14936 switch (fcode)
14938 case ALTIVEC_BUILTIN_LVSL:
14939 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14940 exp, target, false);
14941 case ALTIVEC_BUILTIN_LVSR:
14942 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14943 exp, target, false);
14944 case ALTIVEC_BUILTIN_LVEBX:
14945 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14946 exp, target, false);
14947 case ALTIVEC_BUILTIN_LVEHX:
14948 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14949 exp, target, false);
14950 case ALTIVEC_BUILTIN_LVEWX:
14951 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14952 exp, target, false);
14953 case ALTIVEC_BUILTIN_LVXL_V2DF:
14954 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14955 exp, target, false);
14956 case ALTIVEC_BUILTIN_LVXL_V2DI:
14957 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14958 exp, target, false);
14959 case ALTIVEC_BUILTIN_LVXL_V4SF:
14960 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14961 exp, target, false);
14962 case ALTIVEC_BUILTIN_LVXL:
14963 case ALTIVEC_BUILTIN_LVXL_V4SI:
14964 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14965 exp, target, false);
14966 case ALTIVEC_BUILTIN_LVXL_V8HI:
14967 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14968 exp, target, false);
14969 case ALTIVEC_BUILTIN_LVXL_V16QI:
14970 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14971 exp, target, false);
14972 case ALTIVEC_BUILTIN_LVX_V2DF:
14973 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
14974 exp, target, false);
14975 case ALTIVEC_BUILTIN_LVX_V2DI:
14976 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
14977 exp, target, false);
14978 case ALTIVEC_BUILTIN_LVX_V4SF:
14979 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
14980 exp, target, false);
14981 case ALTIVEC_BUILTIN_LVX:
14982 case ALTIVEC_BUILTIN_LVX_V4SI:
14983 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
14984 exp, target, false);
14985 case ALTIVEC_BUILTIN_LVX_V8HI:
14986 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
14987 exp, target, false);
14988 case ALTIVEC_BUILTIN_LVX_V16QI:
14989 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
14990 exp, target, false);
14991 case ALTIVEC_BUILTIN_LVLX:
14992 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14993 exp, target, true);
14994 case ALTIVEC_BUILTIN_LVLXL:
14995 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14996 exp, target, true);
14997 case ALTIVEC_BUILTIN_LVRX:
14998 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14999 exp, target, true);
15000 case ALTIVEC_BUILTIN_LVRXL:
15001 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15002 exp, target, true);
15003 case VSX_BUILTIN_LXVD2X_V1TI:
15004 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15005 exp, target, false);
15006 case VSX_BUILTIN_LXVD2X_V2DF:
15007 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15008 exp, target, false);
15009 case VSX_BUILTIN_LXVD2X_V2DI:
15010 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15011 exp, target, false);
15012 case VSX_BUILTIN_LXVW4X_V4SF:
15013 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15014 exp, target, false);
15015 case VSX_BUILTIN_LXVW4X_V4SI:
15016 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15017 exp, target, false);
15018 case VSX_BUILTIN_LXVW4X_V8HI:
15019 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15020 exp, target, false);
15021 case VSX_BUILTIN_LXVW4X_V16QI:
15022 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15023 exp, target, false);
15024 /* For the following on big endian, it's ok to use any appropriate
15025 unaligned-supporting load, so use a generic expander. For
15026 little-endian, the exact element-reversing instruction must
15027 be used. */
15028 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15030 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15031 : CODE_FOR_vsx_ld_elemrev_v2df);
15032 return altivec_expand_lv_builtin (code, exp, target, false);
15034 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15036 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15037 : CODE_FOR_vsx_ld_elemrev_v2di);
15038 return altivec_expand_lv_builtin (code, exp, target, false);
15040 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15042 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15043 : CODE_FOR_vsx_ld_elemrev_v4sf);
15044 return altivec_expand_lv_builtin (code, exp, target, false);
15046 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15048 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15049 : CODE_FOR_vsx_ld_elemrev_v4si);
15050 return altivec_expand_lv_builtin (code, exp, target, false);
15052 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15054 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15055 : CODE_FOR_vsx_ld_elemrev_v8hi);
15056 return altivec_expand_lv_builtin (code, exp, target, false);
15058 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15060 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15061 : CODE_FOR_vsx_ld_elemrev_v16qi);
15062 return altivec_expand_lv_builtin (code, exp, target, false);
15064 break;
15065 default:
15066 break;
15067 /* Fall through. */
15070 *expandedp = false;
15071 return NULL_RTX;
15074 /* Expand the builtin in EXP and store the result in TARGET. Store
15075 true in *EXPANDEDP if we found a builtin to expand. */
15076 static rtx
15077 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15079 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15080 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15081 const struct builtin_description *d;
15082 size_t i;
15084 *expandedp = true;
15086 switch (fcode)
15088 case PAIRED_BUILTIN_STX:
15089 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15090 case PAIRED_BUILTIN_LX:
15091 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15092 default:
15093 break;
15094 /* Fall through. */
15097 /* Expand the paired predicates. */
15098 d = bdesc_paired_preds;
15099 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15100 if (d->code == fcode)
15101 return paired_expand_predicate_builtin (d->icode, exp, target);
15103 *expandedp = false;
15104 return NULL_RTX;
15107 /* Binops that need to be initialized manually, but can be expanded
15108 automagically by rs6000_expand_binop_builtin. */
15109 static const struct builtin_description bdesc_2arg_spe[] =
15111 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
15112 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
15113 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
15114 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
15115 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
15116 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
15117 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
15118 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
15119 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
15120 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
15121 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
15122 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
15123 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
15124 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
15125 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
15126 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
15127 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
15128 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
15129 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
15130 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
15131 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
15132 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
15135 /* Expand the builtin in EXP and store the result in TARGET. Store
15136 true in *EXPANDEDP if we found a builtin to expand.
15138 This expands the SPE builtins that are not simple unary and binary
15139 operations. */
15140 static rtx
15141 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
15143 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15144 tree arg1, arg0;
15145 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15146 enum insn_code icode;
15147 machine_mode tmode, mode0;
15148 rtx pat, op0;
15149 const struct builtin_description *d;
15150 size_t i;
15152 *expandedp = true;
15154 /* Syntax check for a 5-bit unsigned immediate. */
15155 switch (fcode)
15157 case SPE_BUILTIN_EVSTDD:
15158 case SPE_BUILTIN_EVSTDH:
15159 case SPE_BUILTIN_EVSTDW:
15160 case SPE_BUILTIN_EVSTWHE:
15161 case SPE_BUILTIN_EVSTWHO:
15162 case SPE_BUILTIN_EVSTWWE:
15163 case SPE_BUILTIN_EVSTWWO:
15164 arg1 = CALL_EXPR_ARG (exp, 2);
15165 if (TREE_CODE (arg1) != INTEGER_CST
15166 || TREE_INT_CST_LOW (arg1) & ~0x1f)
15168 error ("argument 2 must be a 5-bit unsigned literal");
15169 return const0_rtx;
15171 break;
15172 default:
15173 break;
15176 /* The evsplat*i instructions are not quite generic. */
15177 switch (fcode)
15179 case SPE_BUILTIN_EVSPLATFI:
15180 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
15181 exp, target);
15182 case SPE_BUILTIN_EVSPLATI:
15183 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
15184 exp, target);
15185 default:
15186 break;
15189 d = bdesc_2arg_spe;
15190 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
15191 if (d->code == fcode)
15192 return rs6000_expand_binop_builtin (d->icode, exp, target);
15194 d = bdesc_spe_predicates;
15195 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
15196 if (d->code == fcode)
15197 return spe_expand_predicate_builtin (d->icode, exp, target);
15199 d = bdesc_spe_evsel;
15200 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
15201 if (d->code == fcode)
15202 return spe_expand_evsel_builtin (d->icode, exp, target);
15204 switch (fcode)
15206 case SPE_BUILTIN_EVSTDDX:
15207 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
15208 case SPE_BUILTIN_EVSTDHX:
15209 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
15210 case SPE_BUILTIN_EVSTDWX:
15211 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
15212 case SPE_BUILTIN_EVSTWHEX:
15213 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
15214 case SPE_BUILTIN_EVSTWHOX:
15215 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
15216 case SPE_BUILTIN_EVSTWWEX:
15217 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
15218 case SPE_BUILTIN_EVSTWWOX:
15219 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
15220 case SPE_BUILTIN_EVSTDD:
15221 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
15222 case SPE_BUILTIN_EVSTDH:
15223 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
15224 case SPE_BUILTIN_EVSTDW:
15225 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
15226 case SPE_BUILTIN_EVSTWHE:
15227 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
15228 case SPE_BUILTIN_EVSTWHO:
15229 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
15230 case SPE_BUILTIN_EVSTWWE:
15231 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
15232 case SPE_BUILTIN_EVSTWWO:
15233 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
15234 case SPE_BUILTIN_MFSPEFSCR:
15235 icode = CODE_FOR_spe_mfspefscr;
15236 tmode = insn_data[icode].operand[0].mode;
15238 if (target == 0
15239 || GET_MODE (target) != tmode
15240 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15241 target = gen_reg_rtx (tmode);
15243 pat = GEN_FCN (icode) (target);
15244 if (! pat)
15245 return 0;
15246 emit_insn (pat);
15247 return target;
15248 case SPE_BUILTIN_MTSPEFSCR:
15249 icode = CODE_FOR_spe_mtspefscr;
15250 arg0 = CALL_EXPR_ARG (exp, 0);
15251 op0 = expand_normal (arg0);
15252 mode0 = insn_data[icode].operand[0].mode;
15254 if (arg0 == error_mark_node)
15255 return const0_rtx;
15257 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15258 op0 = copy_to_mode_reg (mode0, op0);
15260 pat = GEN_FCN (icode) (op0);
15261 if (pat)
15262 emit_insn (pat);
15263 return NULL_RTX;
15264 default:
15265 break;
15268 *expandedp = false;
15269 return NULL_RTX;
15272 static rtx
15273 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15275 rtx pat, scratch, tmp;
15276 tree form = CALL_EXPR_ARG (exp, 0);
15277 tree arg0 = CALL_EXPR_ARG (exp, 1);
15278 tree arg1 = CALL_EXPR_ARG (exp, 2);
15279 rtx op0 = expand_normal (arg0);
15280 rtx op1 = expand_normal (arg1);
15281 machine_mode mode0 = insn_data[icode].operand[1].mode;
15282 machine_mode mode1 = insn_data[icode].operand[2].mode;
15283 int form_int;
15284 enum rtx_code code;
15286 if (TREE_CODE (form) != INTEGER_CST)
15288 error ("argument 1 of __builtin_paired_predicate must be a constant");
15289 return const0_rtx;
15291 else
15292 form_int = TREE_INT_CST_LOW (form);
15294 gcc_assert (mode0 == mode1);
15296 if (arg0 == error_mark_node || arg1 == error_mark_node)
15297 return const0_rtx;
15299 if (target == 0
15300 || GET_MODE (target) != SImode
15301 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15302 target = gen_reg_rtx (SImode);
15303 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15304 op0 = copy_to_mode_reg (mode0, op0);
15305 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15306 op1 = copy_to_mode_reg (mode1, op1);
15308 scratch = gen_reg_rtx (CCFPmode);
15310 pat = GEN_FCN (icode) (scratch, op0, op1);
15311 if (!pat)
15312 return const0_rtx;
15314 emit_insn (pat);
15316 switch (form_int)
15318 /* LT bit. */
15319 case 0:
15320 code = LT;
15321 break;
15322 /* GT bit. */
15323 case 1:
15324 code = GT;
15325 break;
15326 /* EQ bit. */
15327 case 2:
15328 code = EQ;
15329 break;
15330 /* UN bit. */
15331 case 3:
15332 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15333 return target;
15334 default:
15335 error ("argument 1 of __builtin_paired_predicate is out of range");
15336 return const0_rtx;
15339 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15340 emit_move_insn (target, tmp);
15341 return target;
15344 static rtx
15345 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15347 rtx pat, scratch, tmp;
15348 tree form = CALL_EXPR_ARG (exp, 0);
15349 tree arg0 = CALL_EXPR_ARG (exp, 1);
15350 tree arg1 = CALL_EXPR_ARG (exp, 2);
15351 rtx op0 = expand_normal (arg0);
15352 rtx op1 = expand_normal (arg1);
15353 machine_mode mode0 = insn_data[icode].operand[1].mode;
15354 machine_mode mode1 = insn_data[icode].operand[2].mode;
15355 int form_int;
15356 enum rtx_code code;
15358 if (TREE_CODE (form) != INTEGER_CST)
15360 error ("argument 1 of __builtin_spe_predicate must be a constant");
15361 return const0_rtx;
15363 else
15364 form_int = TREE_INT_CST_LOW (form);
15366 gcc_assert (mode0 == mode1);
15368 if (arg0 == error_mark_node || arg1 == error_mark_node)
15369 return const0_rtx;
15371 if (target == 0
15372 || GET_MODE (target) != SImode
15373 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
15374 target = gen_reg_rtx (SImode);
15376 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15377 op0 = copy_to_mode_reg (mode0, op0);
15378 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15379 op1 = copy_to_mode_reg (mode1, op1);
15381 scratch = gen_reg_rtx (CCmode);
15383 pat = GEN_FCN (icode) (scratch, op0, op1);
15384 if (! pat)
15385 return const0_rtx;
15386 emit_insn (pat);
15388 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
15389 _lower_. We use one compare, but look in different bits of the
15390 CR for each variant.
15392 There are 2 elements in each SPE simd type (upper/lower). The CR
15393 bits are set as follows:
15395 BIT0 | BIT 1 | BIT 2 | BIT 3
15396 U | L | (U | L) | (U & L)
15398 So, for an "all" relationship, BIT 3 would be set.
15399 For an "any" relationship, BIT 2 would be set. Etc.
15401 Following traditional nomenclature, these bits map to:
15403 BIT0 | BIT 1 | BIT 2 | BIT 3
15404 LT | GT | EQ | OV
15406 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
15409 switch (form_int)
15411 /* All variant. OV bit. */
15412 case 0:
15413 /* We need to get to the OV bit, which is the ORDERED bit. We
15414 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
15415 that's ugly and will make validate_condition_mode die.
15416 So let's just use another pattern. */
15417 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15418 return target;
15419 /* Any variant. EQ bit. */
15420 case 1:
15421 code = EQ;
15422 break;
15423 /* Upper variant. LT bit. */
15424 case 2:
15425 code = LT;
15426 break;
15427 /* Lower variant. GT bit. */
15428 case 3:
15429 code = GT;
15430 break;
15431 default:
15432 error ("argument 1 of __builtin_spe_predicate is out of range");
15433 return const0_rtx;
15436 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15437 emit_move_insn (target, tmp);
15439 return target;
15442 /* The evsel builtins look like this:
15444 e = __builtin_spe_evsel_OP (a, b, c, d);
15446 and work like this:
15448 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
15449 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
15452 static rtx
15453 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
15455 rtx pat, scratch;
15456 tree arg0 = CALL_EXPR_ARG (exp, 0);
15457 tree arg1 = CALL_EXPR_ARG (exp, 1);
15458 tree arg2 = CALL_EXPR_ARG (exp, 2);
15459 tree arg3 = CALL_EXPR_ARG (exp, 3);
15460 rtx op0 = expand_normal (arg0);
15461 rtx op1 = expand_normal (arg1);
15462 rtx op2 = expand_normal (arg2);
15463 rtx op3 = expand_normal (arg3);
15464 machine_mode mode0 = insn_data[icode].operand[1].mode;
15465 machine_mode mode1 = insn_data[icode].operand[2].mode;
15467 gcc_assert (mode0 == mode1);
15469 if (arg0 == error_mark_node || arg1 == error_mark_node
15470 || arg2 == error_mark_node || arg3 == error_mark_node)
15471 return const0_rtx;
15473 if (target == 0
15474 || GET_MODE (target) != mode0
15475 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
15476 target = gen_reg_rtx (mode0);
15478 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15479 op0 = copy_to_mode_reg (mode0, op0);
15480 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15481 op1 = copy_to_mode_reg (mode0, op1);
15482 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
15483 op2 = copy_to_mode_reg (mode0, op2);
15484 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
15485 op3 = copy_to_mode_reg (mode0, op3);
15487 /* Generate the compare. */
15488 scratch = gen_reg_rtx (CCmode);
15489 pat = GEN_FCN (icode) (scratch, op0, op1);
15490 if (! pat)
15491 return const0_rtx;
15492 emit_insn (pat);
15494 if (mode0 == V2SImode)
15495 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
15496 else
15497 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
15499 return target;
15502 /* Raise an error message for a builtin function that is called without the
15503 appropriate target options being set. */
15505 static void
15506 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15508 size_t uns_fncode = (size_t)fncode;
15509 const char *name = rs6000_builtin_info[uns_fncode].name;
15510 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15512 gcc_assert (name != NULL);
15513 if ((fnmask & RS6000_BTM_CELL) != 0)
15514 error ("Builtin function %s is only valid for the cell processor", name);
15515 else if ((fnmask & RS6000_BTM_VSX) != 0)
15516 error ("Builtin function %s requires the -mvsx option", name);
15517 else if ((fnmask & RS6000_BTM_HTM) != 0)
15518 error ("Builtin function %s requires the -mhtm option", name);
15519 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15520 error ("Builtin function %s requires the -maltivec option", name);
15521 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
15522 error ("Builtin function %s requires the -mpaired option", name);
15523 else if ((fnmask & RS6000_BTM_SPE) != 0)
15524 error ("Builtin function %s requires the -mspe option", name);
15525 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15526 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15527 error ("Builtin function %s requires the -mhard-dfp and"
15528 " -mpower8-vector options", name);
15529 else if ((fnmask & RS6000_BTM_DFP) != 0)
15530 error ("Builtin function %s requires the -mhard-dfp option", name);
15531 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15532 error ("Builtin function %s requires the -mpower8-vector option", name);
15533 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15534 error ("Builtin function %s requires the -mcpu=power9 option", name);
15535 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15536 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15537 error ("Builtin function %s requires the -mcpu=power9 and"
15538 " -m64 options", name);
15539 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15540 error ("Builtin function %s requires the -mcpu=power9 option", name);
15541 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15542 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15543 error ("Builtin function %s requires the -mhard-float and"
15544 " -mlong-double-128 options", name);
15545 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15546 error ("Builtin function %s requires the -mhard-float option", name);
15547 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15548 error ("Builtin function %s requires the -mfloat128 option", name);
15549 else
15550 error ("Builtin function %s is not supported with the current options",
15551 name);
15554 /* Target hook for early folding of built-ins, shamelessly stolen
15555 from ia64.c. */
15557 static tree
15558 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
15559 tree *args, bool ignore ATTRIBUTE_UNUSED)
15561 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
15563 enum rs6000_builtins fn_code
15564 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15565 switch (fn_code)
15567 case RS6000_BUILTIN_NANQ:
15568 case RS6000_BUILTIN_NANSQ:
15570 tree type = TREE_TYPE (TREE_TYPE (fndecl));
15571 const char *str = c_getstr (*args);
15572 int quiet = fn_code == RS6000_BUILTIN_NANQ;
15573 REAL_VALUE_TYPE real;
15575 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
15576 return build_real (type, real);
15577 return NULL_TREE;
15579 case RS6000_BUILTIN_INFQ:
15580 case RS6000_BUILTIN_HUGE_VALQ:
15582 tree type = TREE_TYPE (TREE_TYPE (fndecl));
15583 REAL_VALUE_TYPE inf;
15584 real_inf (&inf);
15585 return build_real (type, inf);
15587 default:
15588 break;
15591 #ifdef SUBTARGET_FOLD_BUILTIN
15592 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15593 #else
15594 return NULL_TREE;
15595 #endif
15598 /* Expand an expression EXP that calls a built-in function,
15599 with result going to TARGET if that's convenient
15600 (and in mode MODE if that's convenient).
15601 SUBTARGET may be used as the target for computing one of EXP's operands.
15602 IGNORE is nonzero if the value is to be ignored. */
15604 static rtx
15605 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15606 machine_mode mode ATTRIBUTE_UNUSED,
15607 int ignore ATTRIBUTE_UNUSED)
15609 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15610 enum rs6000_builtins fcode
15611 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15612 size_t uns_fcode = (size_t)fcode;
15613 const struct builtin_description *d;
15614 size_t i;
15615 rtx ret;
15616 bool success;
15617 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15618 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15620 if (TARGET_DEBUG_BUILTIN)
15622 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15623 const char *name1 = rs6000_builtin_info[uns_fcode].name;
15624 const char *name2 = ((icode != CODE_FOR_nothing)
15625 ? get_insn_name ((int)icode)
15626 : "nothing");
15627 const char *name3;
15629 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
15631 default: name3 = "unknown"; break;
15632 case RS6000_BTC_SPECIAL: name3 = "special"; break;
15633 case RS6000_BTC_UNARY: name3 = "unary"; break;
15634 case RS6000_BTC_BINARY: name3 = "binary"; break;
15635 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
15636 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
15637 case RS6000_BTC_ABS: name3 = "abs"; break;
15638 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
15639 case RS6000_BTC_DST: name3 = "dst"; break;
15643 fprintf (stderr,
15644 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
15645 (name1) ? name1 : "---", fcode,
15646 (name2) ? name2 : "---", (int)icode,
15647 name3,
15648 func_valid_p ? "" : ", not valid");
15651 if (!func_valid_p)
15653 rs6000_invalid_builtin (fcode);
15655 /* Given it is invalid, just generate a normal call. */
15656 return expand_call (exp, target, ignore);
15659 switch (fcode)
15661 case RS6000_BUILTIN_RECIP:
15662 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
15664 case RS6000_BUILTIN_RECIPF:
15665 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
15667 case RS6000_BUILTIN_RSQRTF:
15668 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
15670 case RS6000_BUILTIN_RSQRT:
15671 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
15673 case POWER7_BUILTIN_BPERMD:
15674 return rs6000_expand_binop_builtin (((TARGET_64BIT)
15675 ? CODE_FOR_bpermd_di
15676 : CODE_FOR_bpermd_si), exp, target);
15678 case RS6000_BUILTIN_GET_TB:
15679 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
15680 target);
15682 case RS6000_BUILTIN_MFTB:
15683 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
15684 ? CODE_FOR_rs6000_mftb_di
15685 : CODE_FOR_rs6000_mftb_si),
15686 target);
15688 case RS6000_BUILTIN_MFFS:
15689 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
15691 case RS6000_BUILTIN_MTFSF:
15692 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
15694 case RS6000_BUILTIN_CPU_INIT:
15695 case RS6000_BUILTIN_CPU_IS:
15696 case RS6000_BUILTIN_CPU_SUPPORTS:
15697 return cpu_expand_builtin (fcode, exp, target);
15699 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
15700 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
15702 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
15703 : (int) CODE_FOR_altivec_lvsl_direct);
15704 machine_mode tmode = insn_data[icode].operand[0].mode;
15705 machine_mode mode = insn_data[icode].operand[1].mode;
15706 tree arg;
15707 rtx op, addr, pat;
15709 gcc_assert (TARGET_ALTIVEC);
15711 arg = CALL_EXPR_ARG (exp, 0);
15712 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
15713 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
15714 addr = memory_address (mode, op);
15715 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
15716 op = addr;
15717 else
15719 /* For the load case need to negate the address. */
15720 op = gen_reg_rtx (GET_MODE (addr));
15721 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
15723 op = gen_rtx_MEM (mode, op);
15725 if (target == 0
15726 || GET_MODE (target) != tmode
15727 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15728 target = gen_reg_rtx (tmode);
15730 pat = GEN_FCN (icode) (target, op);
15731 if (!pat)
15732 return 0;
15733 emit_insn (pat);
15735 return target;
15738 case ALTIVEC_BUILTIN_VCFUX:
15739 case ALTIVEC_BUILTIN_VCFSX:
15740 case ALTIVEC_BUILTIN_VCTUXS:
15741 case ALTIVEC_BUILTIN_VCTSXS:
15742 /* FIXME: There's got to be a nicer way to handle this case than
15743 constructing a new CALL_EXPR. */
15744 if (call_expr_nargs (exp) == 1)
15746 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
15747 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
15749 break;
15751 default:
15752 break;
15755 if (TARGET_ALTIVEC)
15757 ret = altivec_expand_builtin (exp, target, &success);
15759 if (success)
15760 return ret;
15762 if (TARGET_SPE)
15764 ret = spe_expand_builtin (exp, target, &success);
15766 if (success)
15767 return ret;
15769 if (TARGET_PAIRED_FLOAT)
15771 ret = paired_expand_builtin (exp, target, &success);
15773 if (success)
15774 return ret;
15776 if (TARGET_HTM)
15778 ret = htm_expand_builtin (exp, target, &success);
15780 if (success)
15781 return ret;
15784 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
15785 /* RS6000_BTC_SPECIAL represents no-operand operators. */
15786 gcc_assert (attr == RS6000_BTC_UNARY
15787 || attr == RS6000_BTC_BINARY
15788 || attr == RS6000_BTC_TERNARY
15789 || attr == RS6000_BTC_SPECIAL);
15791 /* Handle simple unary operations. */
15792 d = bdesc_1arg;
15793 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15794 if (d->code == fcode)
15795 return rs6000_expand_unop_builtin (d->icode, exp, target);
15797 /* Handle simple binary operations. */
15798 d = bdesc_2arg;
15799 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15800 if (d->code == fcode)
15801 return rs6000_expand_binop_builtin (d->icode, exp, target);
15803 /* Handle simple ternary operations. */
15804 d = bdesc_3arg;
15805 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
15806 if (d->code == fcode)
15807 return rs6000_expand_ternop_builtin (d->icode, exp, target);
15809 /* Handle simple no-argument operations. */
15810 d = bdesc_0arg;
15811 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
15812 if (d->code == fcode)
15813 return rs6000_expand_zeroop_builtin (d->icode, target);
15815 gcc_unreachable ();
15818 static void
15819 rs6000_init_builtins (void)
15821 tree tdecl;
15822 tree ftype;
15823 machine_mode mode;
15825 if (TARGET_DEBUG_BUILTIN)
15826 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
15827 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
15828 (TARGET_SPE) ? ", spe" : "",
15829 (TARGET_ALTIVEC) ? ", altivec" : "",
15830 (TARGET_VSX) ? ", vsx" : "");
15832 V2SI_type_node = build_vector_type (intSI_type_node, 2);
15833 V2SF_type_node = build_vector_type (float_type_node, 2);
15834 V2DI_type_node = build_vector_type (intDI_type_node, 2);
15835 V2DF_type_node = build_vector_type (double_type_node, 2);
15836 V4HI_type_node = build_vector_type (intHI_type_node, 4);
15837 V4SI_type_node = build_vector_type (intSI_type_node, 4);
15838 V4SF_type_node = build_vector_type (float_type_node, 4);
15839 V8HI_type_node = build_vector_type (intHI_type_node, 8);
15840 V16QI_type_node = build_vector_type (intQI_type_node, 16);
15842 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
15843 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
15844 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
15845 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
15847 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
15848 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
15849 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
15850 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
15852 const_str_type_node
15853 = build_pointer_type (build_qualified_type (char_type_node,
15854 TYPE_QUAL_CONST));
15856 /* We use V1TI mode as a special container to hold __int128_t items that
15857 must live in VSX registers. */
15858 if (intTI_type_node)
15860 V1TI_type_node = build_vector_type (intTI_type_node, 1);
15861 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
15864 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
15865 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
15866 'vector unsigned short'. */
15868 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
15869 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
15870 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
15871 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
15872 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
15874 long_integer_type_internal_node = long_integer_type_node;
15875 long_unsigned_type_internal_node = long_unsigned_type_node;
15876 long_long_integer_type_internal_node = long_long_integer_type_node;
15877 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
15878 intQI_type_internal_node = intQI_type_node;
15879 uintQI_type_internal_node = unsigned_intQI_type_node;
15880 intHI_type_internal_node = intHI_type_node;
15881 uintHI_type_internal_node = unsigned_intHI_type_node;
15882 intSI_type_internal_node = intSI_type_node;
15883 uintSI_type_internal_node = unsigned_intSI_type_node;
15884 intDI_type_internal_node = intDI_type_node;
15885 uintDI_type_internal_node = unsigned_intDI_type_node;
15886 intTI_type_internal_node = intTI_type_node;
15887 uintTI_type_internal_node = unsigned_intTI_type_node;
15888 float_type_internal_node = float_type_node;
15889 double_type_internal_node = double_type_node;
15890 long_double_type_internal_node = long_double_type_node;
15891 dfloat64_type_internal_node = dfloat64_type_node;
15892 dfloat128_type_internal_node = dfloat128_type_node;
15893 void_type_internal_node = void_type_node;
15895 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
15896 IFmode is the IBM extended 128-bit format that is a pair of doubles.
15897 TFmode will be either IEEE 128-bit floating point or the IBM double-double
15898 format that uses a pair of doubles, depending on the switches and
15899 defaults. */
15900 if (TARGET_FLOAT128)
15902 ibm128_float_type_node = make_node (REAL_TYPE);
15903 TYPE_PRECISION (ibm128_float_type_node) = 128;
15904 layout_type (ibm128_float_type_node);
15905 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
15907 ieee128_float_type_node = make_node (REAL_TYPE);
15908 TYPE_PRECISION (ieee128_float_type_node) = 128;
15909 layout_type (ieee128_float_type_node);
15910 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
15912 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
15913 "__float128");
15915 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
15916 "__ibm128");
15918 else
15920 /* All types must be nonzero, or self-test barfs during bootstrap. */
15921 ieee128_float_type_node = long_double_type_node;
15922 ibm128_float_type_node = long_double_type_node;
15925 /* Initialize the modes for builtin_function_type, mapping a machine mode to
15926 tree type node. */
15927 builtin_mode_to_type[QImode][0] = integer_type_node;
15928 builtin_mode_to_type[HImode][0] = integer_type_node;
15929 builtin_mode_to_type[SImode][0] = intSI_type_node;
15930 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
15931 builtin_mode_to_type[DImode][0] = intDI_type_node;
15932 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
15933 builtin_mode_to_type[TImode][0] = intTI_type_node;
15934 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
15935 builtin_mode_to_type[SFmode][0] = float_type_node;
15936 builtin_mode_to_type[DFmode][0] = double_type_node;
15937 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
15938 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
15939 builtin_mode_to_type[TFmode][0] = long_double_type_node;
15940 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
15941 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
15942 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
15943 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
15944 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
15945 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
15946 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
15947 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
15948 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
15949 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
15950 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
15951 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
15952 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
15953 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
15954 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
15955 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
15956 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
15958 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
15959 TYPE_NAME (bool_char_type_node) = tdecl;
15961 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
15962 TYPE_NAME (bool_short_type_node) = tdecl;
15964 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
15965 TYPE_NAME (bool_int_type_node) = tdecl;
15967 tdecl = add_builtin_type ("__pixel", pixel_type_node);
15968 TYPE_NAME (pixel_type_node) = tdecl;
15970 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
15971 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
15972 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
15973 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
15974 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
15976 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
15977 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
15979 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
15980 TYPE_NAME (V16QI_type_node) = tdecl;
15982 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
15983 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
15985 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
15986 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
15988 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
15989 TYPE_NAME (V8HI_type_node) = tdecl;
15991 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
15992 TYPE_NAME (bool_V8HI_type_node) = tdecl;
15994 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
15995 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
15997 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
15998 TYPE_NAME (V4SI_type_node) = tdecl;
16000 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
16001 TYPE_NAME (bool_V4SI_type_node) = tdecl;
16003 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
16004 TYPE_NAME (V4SF_type_node) = tdecl;
16006 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
16007 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
16009 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
16010 TYPE_NAME (V2DF_type_node) = tdecl;
16012 if (TARGET_POWERPC64)
16014 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
16015 TYPE_NAME (V2DI_type_node) = tdecl;
16017 tdecl = add_builtin_type ("__vector unsigned long",
16018 unsigned_V2DI_type_node);
16019 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
16021 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
16022 TYPE_NAME (bool_V2DI_type_node) = tdecl;
16024 else
16026 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
16027 TYPE_NAME (V2DI_type_node) = tdecl;
16029 tdecl = add_builtin_type ("__vector unsigned long long",
16030 unsigned_V2DI_type_node);
16031 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
16033 tdecl = add_builtin_type ("__vector __bool long long",
16034 bool_V2DI_type_node);
16035 TYPE_NAME (bool_V2DI_type_node) = tdecl;
16038 if (V1TI_type_node)
16040 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
16041 TYPE_NAME (V1TI_type_node) = tdecl;
16043 tdecl = add_builtin_type ("__vector unsigned __int128",
16044 unsigned_V1TI_type_node);
16045 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
16048 /* Paired and SPE builtins are only available if you build a compiler with
16049 the appropriate options, so only create those builtins with the
16050 appropriate compiler option. Create Altivec and VSX builtins on machines
16051 with at least the general purpose extensions (970 and newer) to allow the
16052 use of the target attribute. */
16053 if (TARGET_PAIRED_FLOAT)
16054 paired_init_builtins ();
16055 if (TARGET_SPE)
16056 spe_init_builtins ();
16057 if (TARGET_EXTRA_BUILTINS)
16058 altivec_init_builtins ();
16059 if (TARGET_HTM)
16060 htm_init_builtins ();
16062 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
16063 rs6000_common_init_builtins ();
16065 ftype = build_function_type_list (ieee128_float_type_node,
16066 const_str_type_node, NULL_TREE);
16067 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
16068 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
16070 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
16071 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
16072 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
16074 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16075 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16076 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16078 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16079 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16080 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16082 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16083 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16084 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16086 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16087 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16088 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16090 mode = (TARGET_64BIT) ? DImode : SImode;
16091 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16092 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16093 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16095 ftype = build_function_type_list (unsigned_intDI_type_node,
16096 NULL_TREE);
16097 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16099 if (TARGET_64BIT)
16100 ftype = build_function_type_list (unsigned_intDI_type_node,
16101 NULL_TREE);
16102 else
16103 ftype = build_function_type_list (unsigned_intSI_type_node,
16104 NULL_TREE);
16105 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16107 ftype = build_function_type_list (double_type_node, NULL_TREE);
16108 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16110 ftype = build_function_type_list (void_type_node,
16111 intSI_type_node, double_type_node,
16112 NULL_TREE);
16113 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16115 ftype = build_function_type_list (void_type_node, NULL_TREE);
16116 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16118 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16119 NULL_TREE);
16120 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16121 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16123 #if TARGET_XCOFF
16124 /* AIX libm provides clog as __clog. */
16125 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16126 set_user_assembler_name (tdecl, "__clog");
16127 #endif
16129 #ifdef SUBTARGET_INIT_BUILTINS
16130 SUBTARGET_INIT_BUILTINS;
16131 #endif
16134 /* Returns the rs6000 builtin decl for CODE. */
16136 static tree
16137 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16139 HOST_WIDE_INT fnmask;
16141 if (code >= RS6000_BUILTIN_COUNT)
16142 return error_mark_node;
16144 fnmask = rs6000_builtin_info[code].mask;
16145 if ((fnmask & rs6000_builtin_mask) != fnmask)
16147 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16148 return error_mark_node;
16151 return rs6000_builtin_decls[code];
16154 static void
16155 spe_init_builtins (void)
16157 tree puint_type_node = build_pointer_type (unsigned_type_node);
16158 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
16159 const struct builtin_description *d;
16160 size_t i;
16162 tree v2si_ftype_4_v2si
16163 = build_function_type_list (opaque_V2SI_type_node,
16164 opaque_V2SI_type_node,
16165 opaque_V2SI_type_node,
16166 opaque_V2SI_type_node,
16167 opaque_V2SI_type_node,
16168 NULL_TREE);
16170 tree v2sf_ftype_4_v2sf
16171 = build_function_type_list (opaque_V2SF_type_node,
16172 opaque_V2SF_type_node,
16173 opaque_V2SF_type_node,
16174 opaque_V2SF_type_node,
16175 opaque_V2SF_type_node,
16176 NULL_TREE);
16178 tree int_ftype_int_v2si_v2si
16179 = build_function_type_list (integer_type_node,
16180 integer_type_node,
16181 opaque_V2SI_type_node,
16182 opaque_V2SI_type_node,
16183 NULL_TREE);
16185 tree int_ftype_int_v2sf_v2sf
16186 = build_function_type_list (integer_type_node,
16187 integer_type_node,
16188 opaque_V2SF_type_node,
16189 opaque_V2SF_type_node,
16190 NULL_TREE);
16192 tree void_ftype_v2si_puint_int
16193 = build_function_type_list (void_type_node,
16194 opaque_V2SI_type_node,
16195 puint_type_node,
16196 integer_type_node,
16197 NULL_TREE);
16199 tree void_ftype_v2si_puint_char
16200 = build_function_type_list (void_type_node,
16201 opaque_V2SI_type_node,
16202 puint_type_node,
16203 char_type_node,
16204 NULL_TREE);
16206 tree void_ftype_v2si_pv2si_int
16207 = build_function_type_list (void_type_node,
16208 opaque_V2SI_type_node,
16209 opaque_p_V2SI_type_node,
16210 integer_type_node,
16211 NULL_TREE);
16213 tree void_ftype_v2si_pv2si_char
16214 = build_function_type_list (void_type_node,
16215 opaque_V2SI_type_node,
16216 opaque_p_V2SI_type_node,
16217 char_type_node,
16218 NULL_TREE);
16220 tree void_ftype_int
16221 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16223 tree int_ftype_void
16224 = build_function_type_list (integer_type_node, NULL_TREE);
16226 tree v2si_ftype_pv2si_int
16227 = build_function_type_list (opaque_V2SI_type_node,
16228 opaque_p_V2SI_type_node,
16229 integer_type_node,
16230 NULL_TREE);
16232 tree v2si_ftype_puint_int
16233 = build_function_type_list (opaque_V2SI_type_node,
16234 puint_type_node,
16235 integer_type_node,
16236 NULL_TREE);
16238 tree v2si_ftype_pushort_int
16239 = build_function_type_list (opaque_V2SI_type_node,
16240 pushort_type_node,
16241 integer_type_node,
16242 NULL_TREE);
16244 tree v2si_ftype_signed_char
16245 = build_function_type_list (opaque_V2SI_type_node,
16246 signed_char_type_node,
16247 NULL_TREE);
16249 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
16251 /* Initialize irregular SPE builtins. */
16253 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
16254 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
16255 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
16256 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
16257 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
16258 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
16259 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
16260 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
16261 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
16262 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
16263 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
16264 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
16265 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
16266 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
16267 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
16268 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
16269 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
16270 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
16272 /* Loads. */
16273 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
16274 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
16275 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
16276 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
16277 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
16278 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
16279 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
16280 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
16281 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
16282 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
16283 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
16284 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
16285 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
16286 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
16287 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
16288 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
16289 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
16290 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
16291 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
16292 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
16293 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
16294 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
16296 /* Predicates. */
16297 d = bdesc_spe_predicates;
16298 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
16300 tree type;
16302 switch (insn_data[d->icode].operand[1].mode)
16304 case V2SImode:
16305 type = int_ftype_int_v2si_v2si;
16306 break;
16307 case V2SFmode:
16308 type = int_ftype_int_v2sf_v2sf;
16309 break;
16310 default:
16311 gcc_unreachable ();
16314 def_builtin (d->name, type, d->code);
16317 /* Evsel predicates. */
16318 d = bdesc_spe_evsel;
16319 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
16321 tree type;
16323 switch (insn_data[d->icode].operand[1].mode)
16325 case V2SImode:
16326 type = v2si_ftype_4_v2si;
16327 break;
16328 case V2SFmode:
16329 type = v2sf_ftype_4_v2sf;
16330 break;
16331 default:
16332 gcc_unreachable ();
16335 def_builtin (d->name, type, d->code);
16339 static void
16340 paired_init_builtins (void)
16342 const struct builtin_description *d;
16343 size_t i;
16345 tree int_ftype_int_v2sf_v2sf
16346 = build_function_type_list (integer_type_node,
16347 integer_type_node,
16348 V2SF_type_node,
16349 V2SF_type_node,
16350 NULL_TREE);
16351 tree pcfloat_type_node =
16352 build_pointer_type (build_qualified_type
16353 (float_type_node, TYPE_QUAL_CONST));
16355 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
16356 long_integer_type_node,
16357 pcfloat_type_node,
16358 NULL_TREE);
16359 tree void_ftype_v2sf_long_pcfloat =
16360 build_function_type_list (void_type_node,
16361 V2SF_type_node,
16362 long_integer_type_node,
16363 pcfloat_type_node,
16364 NULL_TREE);
16367 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
16368 PAIRED_BUILTIN_LX);
16371 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
16372 PAIRED_BUILTIN_STX);
16374 /* Predicates. */
16375 d = bdesc_paired_preds;
16376 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
16378 tree type;
16380 if (TARGET_DEBUG_BUILTIN)
16381 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
16382 (int)i, get_insn_name (d->icode), (int)d->icode,
16383 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
16385 switch (insn_data[d->icode].operand[1].mode)
16387 case V2SFmode:
16388 type = int_ftype_int_v2sf_v2sf;
16389 break;
16390 default:
16391 gcc_unreachable ();
16394 def_builtin (d->name, type, d->code);
16398 static void
16399 altivec_init_builtins (void)
16401 const struct builtin_description *d;
16402 size_t i;
16403 tree ftype;
16404 tree decl;
16406 tree pvoid_type_node = build_pointer_type (void_type_node);
16408 tree pcvoid_type_node
16409 = build_pointer_type (build_qualified_type (void_type_node,
16410 TYPE_QUAL_CONST));
16412 tree int_ftype_opaque
16413 = build_function_type_list (integer_type_node,
16414 opaque_V4SI_type_node, NULL_TREE);
16415 tree opaque_ftype_opaque
16416 = build_function_type_list (integer_type_node, NULL_TREE);
16417 tree opaque_ftype_opaque_int
16418 = build_function_type_list (opaque_V4SI_type_node,
16419 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16420 tree opaque_ftype_opaque_opaque_int
16421 = build_function_type_list (opaque_V4SI_type_node,
16422 opaque_V4SI_type_node, opaque_V4SI_type_node,
16423 integer_type_node, NULL_TREE);
16424 tree opaque_ftype_opaque_opaque_opaque
16425 = build_function_type_list (opaque_V4SI_type_node,
16426 opaque_V4SI_type_node, opaque_V4SI_type_node,
16427 opaque_V4SI_type_node, NULL_TREE);
16428 tree opaque_ftype_opaque_opaque
16429 = build_function_type_list (opaque_V4SI_type_node,
16430 opaque_V4SI_type_node, opaque_V4SI_type_node,
16431 NULL_TREE);
16432 tree int_ftype_int_opaque_opaque
16433 = build_function_type_list (integer_type_node,
16434 integer_type_node, opaque_V4SI_type_node,
16435 opaque_V4SI_type_node, NULL_TREE);
16436 tree int_ftype_int_v4si_v4si
16437 = build_function_type_list (integer_type_node,
16438 integer_type_node, V4SI_type_node,
16439 V4SI_type_node, NULL_TREE);
16440 tree int_ftype_int_v2di_v2di
16441 = build_function_type_list (integer_type_node,
16442 integer_type_node, V2DI_type_node,
16443 V2DI_type_node, NULL_TREE);
16444 tree void_ftype_v4si
16445 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16446 tree v8hi_ftype_void
16447 = build_function_type_list (V8HI_type_node, NULL_TREE);
16448 tree void_ftype_void
16449 = build_function_type_list (void_type_node, NULL_TREE);
16450 tree void_ftype_int
16451 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16453 tree opaque_ftype_long_pcvoid
16454 = build_function_type_list (opaque_V4SI_type_node,
16455 long_integer_type_node, pcvoid_type_node,
16456 NULL_TREE);
16457 tree v16qi_ftype_long_pcvoid
16458 = build_function_type_list (V16QI_type_node,
16459 long_integer_type_node, pcvoid_type_node,
16460 NULL_TREE);
16461 tree v8hi_ftype_long_pcvoid
16462 = build_function_type_list (V8HI_type_node,
16463 long_integer_type_node, pcvoid_type_node,
16464 NULL_TREE);
16465 tree v4si_ftype_long_pcvoid
16466 = build_function_type_list (V4SI_type_node,
16467 long_integer_type_node, pcvoid_type_node,
16468 NULL_TREE);
16469 tree v4sf_ftype_long_pcvoid
16470 = build_function_type_list (V4SF_type_node,
16471 long_integer_type_node, pcvoid_type_node,
16472 NULL_TREE);
16473 tree v2df_ftype_long_pcvoid
16474 = build_function_type_list (V2DF_type_node,
16475 long_integer_type_node, pcvoid_type_node,
16476 NULL_TREE);
16477 tree v2di_ftype_long_pcvoid
16478 = build_function_type_list (V2DI_type_node,
16479 long_integer_type_node, pcvoid_type_node,
16480 NULL_TREE);
16482 tree void_ftype_opaque_long_pvoid
16483 = build_function_type_list (void_type_node,
16484 opaque_V4SI_type_node, long_integer_type_node,
16485 pvoid_type_node, NULL_TREE);
16486 tree void_ftype_v4si_long_pvoid
16487 = build_function_type_list (void_type_node,
16488 V4SI_type_node, long_integer_type_node,
16489 pvoid_type_node, NULL_TREE);
16490 tree void_ftype_v16qi_long_pvoid
16491 = build_function_type_list (void_type_node,
16492 V16QI_type_node, long_integer_type_node,
16493 pvoid_type_node, NULL_TREE);
16494 tree void_ftype_v8hi_long_pvoid
16495 = build_function_type_list (void_type_node,
16496 V8HI_type_node, long_integer_type_node,
16497 pvoid_type_node, NULL_TREE);
16498 tree void_ftype_v4sf_long_pvoid
16499 = build_function_type_list (void_type_node,
16500 V4SF_type_node, long_integer_type_node,
16501 pvoid_type_node, NULL_TREE);
16502 tree void_ftype_v2df_long_pvoid
16503 = build_function_type_list (void_type_node,
16504 V2DF_type_node, long_integer_type_node,
16505 pvoid_type_node, NULL_TREE);
16506 tree void_ftype_v2di_long_pvoid
16507 = build_function_type_list (void_type_node,
16508 V2DI_type_node, long_integer_type_node,
16509 pvoid_type_node, NULL_TREE);
16510 tree int_ftype_int_v8hi_v8hi
16511 = build_function_type_list (integer_type_node,
16512 integer_type_node, V8HI_type_node,
16513 V8HI_type_node, NULL_TREE);
16514 tree int_ftype_int_v16qi_v16qi
16515 = build_function_type_list (integer_type_node,
16516 integer_type_node, V16QI_type_node,
16517 V16QI_type_node, NULL_TREE);
16518 tree int_ftype_int_v4sf_v4sf
16519 = build_function_type_list (integer_type_node,
16520 integer_type_node, V4SF_type_node,
16521 V4SF_type_node, NULL_TREE);
16522 tree int_ftype_int_v2df_v2df
16523 = build_function_type_list (integer_type_node,
16524 integer_type_node, V2DF_type_node,
16525 V2DF_type_node, NULL_TREE);
16526 tree v2di_ftype_v2di
16527 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16528 tree v4si_ftype_v4si
16529 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16530 tree v8hi_ftype_v8hi
16531 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16532 tree v16qi_ftype_v16qi
16533 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16534 tree v4sf_ftype_v4sf
16535 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16536 tree v2df_ftype_v2df
16537 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16538 tree void_ftype_pcvoid_int_int
16539 = build_function_type_list (void_type_node,
16540 pcvoid_type_node, integer_type_node,
16541 integer_type_node, NULL_TREE);
16543 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16544 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16545 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16546 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16547 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16548 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16549 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16550 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16551 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16552 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16553 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16554 ALTIVEC_BUILTIN_LVXL_V2DF);
16555 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16556 ALTIVEC_BUILTIN_LVXL_V2DI);
16557 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16558 ALTIVEC_BUILTIN_LVXL_V4SF);
16559 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16560 ALTIVEC_BUILTIN_LVXL_V4SI);
16561 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16562 ALTIVEC_BUILTIN_LVXL_V8HI);
16563 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16564 ALTIVEC_BUILTIN_LVXL_V16QI);
16565 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16566 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16567 ALTIVEC_BUILTIN_LVX_V2DF);
16568 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16569 ALTIVEC_BUILTIN_LVX_V2DI);
16570 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16571 ALTIVEC_BUILTIN_LVX_V4SF);
16572 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16573 ALTIVEC_BUILTIN_LVX_V4SI);
16574 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16575 ALTIVEC_BUILTIN_LVX_V8HI);
16576 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16577 ALTIVEC_BUILTIN_LVX_V16QI);
16578 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16579 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16580 ALTIVEC_BUILTIN_STVX_V2DF);
16581 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16582 ALTIVEC_BUILTIN_STVX_V2DI);
16583 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16584 ALTIVEC_BUILTIN_STVX_V4SF);
16585 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16586 ALTIVEC_BUILTIN_STVX_V4SI);
16587 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16588 ALTIVEC_BUILTIN_STVX_V8HI);
16589 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16590 ALTIVEC_BUILTIN_STVX_V16QI);
16591 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16592 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16593 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16594 ALTIVEC_BUILTIN_STVXL_V2DF);
16595 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16596 ALTIVEC_BUILTIN_STVXL_V2DI);
16597 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16598 ALTIVEC_BUILTIN_STVXL_V4SF);
16599 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16600 ALTIVEC_BUILTIN_STVXL_V4SI);
16601 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16602 ALTIVEC_BUILTIN_STVXL_V8HI);
16603 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16604 ALTIVEC_BUILTIN_STVXL_V16QI);
16605 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16606 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16607 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16608 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16609 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16610 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16611 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16612 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16613 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16614 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16615 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16616 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16617 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16618 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16619 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16620 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16622 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16623 VSX_BUILTIN_LXVD2X_V2DF);
16624 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16625 VSX_BUILTIN_LXVD2X_V2DI);
16626 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16627 VSX_BUILTIN_LXVW4X_V4SF);
16628 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16629 VSX_BUILTIN_LXVW4X_V4SI);
16630 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16631 VSX_BUILTIN_LXVW4X_V8HI);
16632 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16633 VSX_BUILTIN_LXVW4X_V16QI);
16634 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16635 VSX_BUILTIN_STXVD2X_V2DF);
16636 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16637 VSX_BUILTIN_STXVD2X_V2DI);
16638 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16639 VSX_BUILTIN_STXVW4X_V4SF);
16640 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16641 VSX_BUILTIN_STXVW4X_V4SI);
16642 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16643 VSX_BUILTIN_STXVW4X_V8HI);
16644 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16645 VSX_BUILTIN_STXVW4X_V16QI);
16647 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16648 VSX_BUILTIN_LD_ELEMREV_V2DF);
16649 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16650 VSX_BUILTIN_LD_ELEMREV_V2DI);
16651 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16652 VSX_BUILTIN_LD_ELEMREV_V4SF);
16653 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16654 VSX_BUILTIN_LD_ELEMREV_V4SI);
16655 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16656 VSX_BUILTIN_ST_ELEMREV_V2DF);
16657 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16658 VSX_BUILTIN_ST_ELEMREV_V2DI);
16659 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16660 VSX_BUILTIN_ST_ELEMREV_V4SF);
16661 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16662 VSX_BUILTIN_ST_ELEMREV_V4SI);
16664 if (TARGET_P9_VECTOR)
16666 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16667 VSX_BUILTIN_LD_ELEMREV_V8HI);
16668 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16669 VSX_BUILTIN_LD_ELEMREV_V16QI);
16670 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
16671 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
16672 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
16673 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
16676 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16677 VSX_BUILTIN_VEC_LD);
16678 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16679 VSX_BUILTIN_VEC_ST);
16680 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16681 VSX_BUILTIN_VEC_XL);
16682 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16683 VSX_BUILTIN_VEC_XST);
16685 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16686 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16687 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16689 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16690 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16691 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16692 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16693 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16694 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16695 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16696 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16697 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16698 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16699 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16700 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16702 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16703 ALTIVEC_BUILTIN_VEC_ADDE);
16704 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16705 ALTIVEC_BUILTIN_VEC_ADDEC);
16706 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16707 ALTIVEC_BUILTIN_VEC_CMPNE);
16708 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16709 ALTIVEC_BUILTIN_VEC_MUL);
16711 /* Cell builtins. */
16712 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16713 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16714 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16715 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16717 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16718 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16719 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16720 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16722 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16723 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16724 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16725 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16727 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16728 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16729 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16730 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16732 /* Add the DST variants. */
16733 d = bdesc_dst;
16734 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16735 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16737 /* Initialize the predicates. */
16738 d = bdesc_altivec_preds;
16739 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16741 machine_mode mode1;
16742 tree type;
16744 if (rs6000_overloaded_builtin_p (d->code))
16745 mode1 = VOIDmode;
16746 else
16747 mode1 = insn_data[d->icode].operand[1].mode;
16749 switch (mode1)
16751 case VOIDmode:
16752 type = int_ftype_int_opaque_opaque;
16753 break;
16754 case V2DImode:
16755 type = int_ftype_int_v2di_v2di;
16756 break;
16757 case V4SImode:
16758 type = int_ftype_int_v4si_v4si;
16759 break;
16760 case V8HImode:
16761 type = int_ftype_int_v8hi_v8hi;
16762 break;
16763 case V16QImode:
16764 type = int_ftype_int_v16qi_v16qi;
16765 break;
16766 case V4SFmode:
16767 type = int_ftype_int_v4sf_v4sf;
16768 break;
16769 case V2DFmode:
16770 type = int_ftype_int_v2df_v2df;
16771 break;
16772 default:
16773 gcc_unreachable ();
16776 def_builtin (d->name, type, d->code);
16779 /* Initialize the abs* operators. */
16780 d = bdesc_abs;
16781 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16783 machine_mode mode0;
16784 tree type;
16786 mode0 = insn_data[d->icode].operand[0].mode;
16788 switch (mode0)
16790 case V2DImode:
16791 type = v2di_ftype_v2di;
16792 break;
16793 case V4SImode:
16794 type = v4si_ftype_v4si;
16795 break;
16796 case V8HImode:
16797 type = v8hi_ftype_v8hi;
16798 break;
16799 case V16QImode:
16800 type = v16qi_ftype_v16qi;
16801 break;
16802 case V4SFmode:
16803 type = v4sf_ftype_v4sf;
16804 break;
16805 case V2DFmode:
16806 type = v2df_ftype_v2df;
16807 break;
16808 default:
16809 gcc_unreachable ();
16812 def_builtin (d->name, type, d->code);
16815 /* Initialize target builtin that implements
16816 targetm.vectorize.builtin_mask_for_load. */
16818 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
16819 v16qi_ftype_long_pcvoid,
16820 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
16821 BUILT_IN_MD, NULL, NULL_TREE);
16822 TREE_READONLY (decl) = 1;
16823 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
16824 altivec_builtin_mask_for_load = decl;
16826 /* Access to the vec_init patterns. */
16827 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
16828 integer_type_node, integer_type_node,
16829 integer_type_node, NULL_TREE);
16830 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
16832 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
16833 short_integer_type_node,
16834 short_integer_type_node,
16835 short_integer_type_node,
16836 short_integer_type_node,
16837 short_integer_type_node,
16838 short_integer_type_node,
16839 short_integer_type_node, NULL_TREE);
16840 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
16842 ftype = build_function_type_list (V16QI_type_node, char_type_node,
16843 char_type_node, char_type_node,
16844 char_type_node, char_type_node,
16845 char_type_node, char_type_node,
16846 char_type_node, char_type_node,
16847 char_type_node, char_type_node,
16848 char_type_node, char_type_node,
16849 char_type_node, char_type_node,
16850 char_type_node, NULL_TREE);
16851 def_builtin ("__builtin_vec_init_v16qi", ftype,
16852 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
16854 ftype = build_function_type_list (V4SF_type_node, float_type_node,
16855 float_type_node, float_type_node,
16856 float_type_node, NULL_TREE);
16857 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
16859 /* VSX builtins. */
16860 ftype = build_function_type_list (V2DF_type_node, double_type_node,
16861 double_type_node, NULL_TREE);
16862 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
16864 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
16865 intDI_type_node, NULL_TREE);
16866 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
16868 /* Access to the vec_set patterns. */
16869 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
16870 intSI_type_node,
16871 integer_type_node, NULL_TREE);
16872 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
16874 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16875 intHI_type_node,
16876 integer_type_node, NULL_TREE);
16877 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
16879 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
16880 intQI_type_node,
16881 integer_type_node, NULL_TREE);
16882 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
16884 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
16885 float_type_node,
16886 integer_type_node, NULL_TREE);
16887 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
16889 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
16890 double_type_node,
16891 integer_type_node, NULL_TREE);
16892 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
16894 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
16895 intDI_type_node,
16896 integer_type_node, NULL_TREE);
16897 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
16899 /* Access to the vec_extract patterns. */
16900 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16901 integer_type_node, NULL_TREE);
16902 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
16904 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16905 integer_type_node, NULL_TREE);
16906 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
16908 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
16909 integer_type_node, NULL_TREE);
16910 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
16912 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16913 integer_type_node, NULL_TREE);
16914 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
16916 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16917 integer_type_node, NULL_TREE);
16918 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
16920 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
16921 integer_type_node, NULL_TREE);
16922 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
16925 if (V1TI_type_node)
16927 tree v1ti_ftype_long_pcvoid
16928 = build_function_type_list (V1TI_type_node,
16929 long_integer_type_node, pcvoid_type_node,
16930 NULL_TREE);
16931 tree void_ftype_v1ti_long_pvoid
16932 = build_function_type_list (void_type_node,
16933 V1TI_type_node, long_integer_type_node,
16934 pvoid_type_node, NULL_TREE);
16935 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
16936 VSX_BUILTIN_LXVD2X_V1TI);
16937 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
16938 VSX_BUILTIN_STXVD2X_V1TI);
16939 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
16940 NULL_TREE, NULL_TREE);
16941 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
16942 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
16943 intTI_type_node,
16944 integer_type_node, NULL_TREE);
16945 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
16946 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
16947 integer_type_node, NULL_TREE);
16948 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
16953 static void
16954 htm_init_builtins (void)
16956 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16957 const struct builtin_description *d;
16958 size_t i;
16960 d = bdesc_htm;
16961 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
16963 tree op[MAX_HTM_OPERANDS], type;
16964 HOST_WIDE_INT mask = d->mask;
16965 unsigned attr = rs6000_builtin_info[d->code].attr;
16966 bool void_func = (attr & RS6000_BTC_VOID);
16967 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
16968 int nopnds = 0;
16969 tree gpr_type_node;
16970 tree rettype;
16971 tree argtype;
16973 if (TARGET_32BIT && TARGET_POWERPC64)
16974 gpr_type_node = long_long_unsigned_type_node;
16975 else
16976 gpr_type_node = long_unsigned_type_node;
16978 if (attr & RS6000_BTC_SPR)
16980 rettype = gpr_type_node;
16981 argtype = gpr_type_node;
16983 else if (d->code == HTM_BUILTIN_TABORTDC
16984 || d->code == HTM_BUILTIN_TABORTDCI)
16986 rettype = unsigned_type_node;
16987 argtype = gpr_type_node;
16989 else
16991 rettype = unsigned_type_node;
16992 argtype = unsigned_type_node;
16995 if ((mask & builtin_mask) != mask)
16997 if (TARGET_DEBUG_BUILTIN)
16998 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
16999 continue;
17002 if (d->name == 0)
17004 if (TARGET_DEBUG_BUILTIN)
17005 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17006 (long unsigned) i);
17007 continue;
17010 op[nopnds++] = (void_func) ? void_type_node : rettype;
17012 if (attr_args == RS6000_BTC_UNARY)
17013 op[nopnds++] = argtype;
17014 else if (attr_args == RS6000_BTC_BINARY)
17016 op[nopnds++] = argtype;
17017 op[nopnds++] = argtype;
17019 else if (attr_args == RS6000_BTC_TERNARY)
17021 op[nopnds++] = argtype;
17022 op[nopnds++] = argtype;
17023 op[nopnds++] = argtype;
17026 switch (nopnds)
17028 case 1:
17029 type = build_function_type_list (op[0], NULL_TREE);
17030 break;
17031 case 2:
17032 type = build_function_type_list (op[0], op[1], NULL_TREE);
17033 break;
17034 case 3:
17035 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17036 break;
17037 case 4:
17038 type = build_function_type_list (op[0], op[1], op[2], op[3],
17039 NULL_TREE);
17040 break;
17041 default:
17042 gcc_unreachable ();
17045 def_builtin (d->name, type, d->code);
17049 /* Hash function for builtin functions with up to 3 arguments and a return
17050 type. */
17051 hashval_t
17052 builtin_hasher::hash (builtin_hash_struct *bh)
17054 unsigned ret = 0;
17055 int i;
17057 for (i = 0; i < 4; i++)
17059 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17060 ret = (ret * 2) + bh->uns_p[i];
17063 return ret;
17066 /* Compare builtin hash entries H1 and H2 for equivalence. */
17067 bool
17068 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17070 return ((p1->mode[0] == p2->mode[0])
17071 && (p1->mode[1] == p2->mode[1])
17072 && (p1->mode[2] == p2->mode[2])
17073 && (p1->mode[3] == p2->mode[3])
17074 && (p1->uns_p[0] == p2->uns_p[0])
17075 && (p1->uns_p[1] == p2->uns_p[1])
17076 && (p1->uns_p[2] == p2->uns_p[2])
17077 && (p1->uns_p[3] == p2->uns_p[3]));
17080 /* Map types for builtin functions with an explicit return type and up to 3
17081 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17082 of the argument. */
17083 static tree
17084 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17085 machine_mode mode_arg1, machine_mode mode_arg2,
17086 enum rs6000_builtins builtin, const char *name)
17088 struct builtin_hash_struct h;
17089 struct builtin_hash_struct *h2;
17090 int num_args = 3;
17091 int i;
17092 tree ret_type = NULL_TREE;
17093 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17095 /* Create builtin_hash_table. */
17096 if (builtin_hash_table == NULL)
17097 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17099 h.type = NULL_TREE;
17100 h.mode[0] = mode_ret;
17101 h.mode[1] = mode_arg0;
17102 h.mode[2] = mode_arg1;
17103 h.mode[3] = mode_arg2;
17104 h.uns_p[0] = 0;
17105 h.uns_p[1] = 0;
17106 h.uns_p[2] = 0;
17107 h.uns_p[3] = 0;
17109 /* If the builtin is a type that produces unsigned results or takes unsigned
17110 arguments, and it is returned as a decl for the vectorizer (such as
17111 widening multiplies, permute), make sure the arguments and return value
17112 are type correct. */
17113 switch (builtin)
17115 /* unsigned 1 argument functions. */
17116 case CRYPTO_BUILTIN_VSBOX:
17117 case P8V_BUILTIN_VGBBD:
17118 case MISC_BUILTIN_CDTBCD:
17119 case MISC_BUILTIN_CBCDTD:
17120 h.uns_p[0] = 1;
17121 h.uns_p[1] = 1;
17122 break;
17124 /* unsigned 2 argument functions. */
17125 case ALTIVEC_BUILTIN_VMULEUB_UNS:
17126 case ALTIVEC_BUILTIN_VMULEUH_UNS:
17127 case ALTIVEC_BUILTIN_VMULOUB_UNS:
17128 case ALTIVEC_BUILTIN_VMULOUH_UNS:
17129 case CRYPTO_BUILTIN_VCIPHER:
17130 case CRYPTO_BUILTIN_VCIPHERLAST:
17131 case CRYPTO_BUILTIN_VNCIPHER:
17132 case CRYPTO_BUILTIN_VNCIPHERLAST:
17133 case CRYPTO_BUILTIN_VPMSUMB:
17134 case CRYPTO_BUILTIN_VPMSUMH:
17135 case CRYPTO_BUILTIN_VPMSUMW:
17136 case CRYPTO_BUILTIN_VPMSUMD:
17137 case CRYPTO_BUILTIN_VPMSUM:
17138 case MISC_BUILTIN_ADDG6S:
17139 case MISC_BUILTIN_DIVWEU:
17140 case MISC_BUILTIN_DIVWEUO:
17141 case MISC_BUILTIN_DIVDEU:
17142 case MISC_BUILTIN_DIVDEUO:
17143 h.uns_p[0] = 1;
17144 h.uns_p[1] = 1;
17145 h.uns_p[2] = 1;
17146 break;
17148 /* unsigned 3 argument functions. */
17149 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17150 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17151 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17152 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17153 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17154 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17155 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17156 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17157 case VSX_BUILTIN_VPERM_16QI_UNS:
17158 case VSX_BUILTIN_VPERM_8HI_UNS:
17159 case VSX_BUILTIN_VPERM_4SI_UNS:
17160 case VSX_BUILTIN_VPERM_2DI_UNS:
17161 case VSX_BUILTIN_XXSEL_16QI_UNS:
17162 case VSX_BUILTIN_XXSEL_8HI_UNS:
17163 case VSX_BUILTIN_XXSEL_4SI_UNS:
17164 case VSX_BUILTIN_XXSEL_2DI_UNS:
17165 case CRYPTO_BUILTIN_VPERMXOR:
17166 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17167 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17168 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17169 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17170 case CRYPTO_BUILTIN_VSHASIGMAW:
17171 case CRYPTO_BUILTIN_VSHASIGMAD:
17172 case CRYPTO_BUILTIN_VSHASIGMA:
17173 h.uns_p[0] = 1;
17174 h.uns_p[1] = 1;
17175 h.uns_p[2] = 1;
17176 h.uns_p[3] = 1;
17177 break;
17179 /* signed permute functions with unsigned char mask. */
17180 case ALTIVEC_BUILTIN_VPERM_16QI:
17181 case ALTIVEC_BUILTIN_VPERM_8HI:
17182 case ALTIVEC_BUILTIN_VPERM_4SI:
17183 case ALTIVEC_BUILTIN_VPERM_4SF:
17184 case ALTIVEC_BUILTIN_VPERM_2DI:
17185 case ALTIVEC_BUILTIN_VPERM_2DF:
17186 case VSX_BUILTIN_VPERM_16QI:
17187 case VSX_BUILTIN_VPERM_8HI:
17188 case VSX_BUILTIN_VPERM_4SI:
17189 case VSX_BUILTIN_VPERM_4SF:
17190 case VSX_BUILTIN_VPERM_2DI:
17191 case VSX_BUILTIN_VPERM_2DF:
17192 h.uns_p[3] = 1;
17193 break;
17195 /* unsigned args, signed return. */
17196 case VSX_BUILTIN_XVCVUXDDP_UNS:
17197 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17198 h.uns_p[1] = 1;
17199 break;
17201 /* signed args, unsigned return. */
17202 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17203 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17204 case MISC_BUILTIN_UNPACK_TD:
17205 case MISC_BUILTIN_UNPACK_V1TI:
17206 h.uns_p[0] = 1;
17207 break;
17209 /* unsigned arguments for 128-bit pack instructions. */
17210 case MISC_BUILTIN_PACK_TD:
17211 case MISC_BUILTIN_PACK_V1TI:
17212 h.uns_p[1] = 1;
17213 h.uns_p[2] = 1;
17214 break;
17216 default:
17217 break;
17220 /* Figure out how many args are present. */
17221 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17222 num_args--;
17224 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17225 if (!ret_type && h.uns_p[0])
17226 ret_type = builtin_mode_to_type[h.mode[0]][0];
17228 if (!ret_type)
17229 fatal_error (input_location,
17230 "internal error: builtin function %s had an unexpected "
17231 "return type %s", name, GET_MODE_NAME (h.mode[0]));
17233 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17234 arg_type[i] = NULL_TREE;
17236 for (i = 0; i < num_args; i++)
17238 int m = (int) h.mode[i+1];
17239 int uns_p = h.uns_p[i+1];
17241 arg_type[i] = builtin_mode_to_type[m][uns_p];
17242 if (!arg_type[i] && uns_p)
17243 arg_type[i] = builtin_mode_to_type[m][0];
17245 if (!arg_type[i])
17246 fatal_error (input_location,
17247 "internal error: builtin function %s, argument %d "
17248 "had unexpected argument type %s", name, i,
17249 GET_MODE_NAME (m));
17252 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17253 if (*found == NULL)
17255 h2 = ggc_alloc<builtin_hash_struct> ();
17256 *h2 = h;
17257 *found = h2;
17259 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17260 arg_type[2], NULL_TREE);
17263 return (*found)->type;
17266 static void
17267 rs6000_common_init_builtins (void)
17269 const struct builtin_description *d;
17270 size_t i;
17272 tree opaque_ftype_opaque = NULL_TREE;
17273 tree opaque_ftype_opaque_opaque = NULL_TREE;
17274 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17275 tree v2si_ftype = NULL_TREE;
17276 tree v2si_ftype_qi = NULL_TREE;
17277 tree v2si_ftype_v2si_qi = NULL_TREE;
17278 tree v2si_ftype_int_qi = NULL_TREE;
17279 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17281 if (!TARGET_PAIRED_FLOAT)
17283 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
17284 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
17287 /* Paired and SPE builtins are only available if you build a compiler with
17288 the appropriate options, so only create those builtins with the
17289 appropriate compiler option. Create Altivec and VSX builtins on machines
17290 with at least the general purpose extensions (970 and newer) to allow the
17291 use of the target attribute.. */
17293 if (TARGET_EXTRA_BUILTINS)
17294 builtin_mask |= RS6000_BTM_COMMON;
17296 /* Add the ternary operators. */
17297 d = bdesc_3arg;
17298 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17300 tree type;
17301 HOST_WIDE_INT mask = d->mask;
17303 if ((mask & builtin_mask) != mask)
17305 if (TARGET_DEBUG_BUILTIN)
17306 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17307 continue;
17310 if (rs6000_overloaded_builtin_p (d->code))
17312 if (! (type = opaque_ftype_opaque_opaque_opaque))
17313 type = opaque_ftype_opaque_opaque_opaque
17314 = build_function_type_list (opaque_V4SI_type_node,
17315 opaque_V4SI_type_node,
17316 opaque_V4SI_type_node,
17317 opaque_V4SI_type_node,
17318 NULL_TREE);
17320 else
17322 enum insn_code icode = d->icode;
17323 if (d->name == 0)
17325 if (TARGET_DEBUG_BUILTIN)
17326 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17327 (long unsigned)i);
17329 continue;
17332 if (icode == CODE_FOR_nothing)
17334 if (TARGET_DEBUG_BUILTIN)
17335 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17336 d->name);
17338 continue;
17341 type = builtin_function_type (insn_data[icode].operand[0].mode,
17342 insn_data[icode].operand[1].mode,
17343 insn_data[icode].operand[2].mode,
17344 insn_data[icode].operand[3].mode,
17345 d->code, d->name);
17348 def_builtin (d->name, type, d->code);
17351 /* Add the binary operators. */
17352 d = bdesc_2arg;
17353 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17355 machine_mode mode0, mode1, mode2;
17356 tree type;
17357 HOST_WIDE_INT mask = d->mask;
17359 if ((mask & builtin_mask) != mask)
17361 if (TARGET_DEBUG_BUILTIN)
17362 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17363 continue;
17366 if (rs6000_overloaded_builtin_p (d->code))
17368 if (! (type = opaque_ftype_opaque_opaque))
17369 type = opaque_ftype_opaque_opaque
17370 = build_function_type_list (opaque_V4SI_type_node,
17371 opaque_V4SI_type_node,
17372 opaque_V4SI_type_node,
17373 NULL_TREE);
17375 else
17377 enum insn_code icode = d->icode;
17378 if (d->name == 0)
17380 if (TARGET_DEBUG_BUILTIN)
17381 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17382 (long unsigned)i);
17384 continue;
17387 if (icode == CODE_FOR_nothing)
17389 if (TARGET_DEBUG_BUILTIN)
17390 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17391 d->name);
17393 continue;
17396 mode0 = insn_data[icode].operand[0].mode;
17397 mode1 = insn_data[icode].operand[1].mode;
17398 mode2 = insn_data[icode].operand[2].mode;
17400 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
17402 if (! (type = v2si_ftype_v2si_qi))
17403 type = v2si_ftype_v2si_qi
17404 = build_function_type_list (opaque_V2SI_type_node,
17405 opaque_V2SI_type_node,
17406 char_type_node,
17407 NULL_TREE);
17410 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
17411 && mode2 == QImode)
17413 if (! (type = v2si_ftype_int_qi))
17414 type = v2si_ftype_int_qi
17415 = build_function_type_list (opaque_V2SI_type_node,
17416 integer_type_node,
17417 char_type_node,
17418 NULL_TREE);
17421 else
17422 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17423 d->code, d->name);
17426 def_builtin (d->name, type, d->code);
17429 /* Add the simple unary operators. */
17430 d = bdesc_1arg;
17431 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17433 machine_mode mode0, mode1;
17434 tree type;
17435 HOST_WIDE_INT mask = d->mask;
17437 if ((mask & builtin_mask) != mask)
17439 if (TARGET_DEBUG_BUILTIN)
17440 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17441 continue;
17444 if (rs6000_overloaded_builtin_p (d->code))
17446 if (! (type = opaque_ftype_opaque))
17447 type = opaque_ftype_opaque
17448 = build_function_type_list (opaque_V4SI_type_node,
17449 opaque_V4SI_type_node,
17450 NULL_TREE);
17452 else
17454 enum insn_code icode = d->icode;
17455 if (d->name == 0)
17457 if (TARGET_DEBUG_BUILTIN)
17458 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17459 (long unsigned)i);
17461 continue;
17464 if (icode == CODE_FOR_nothing)
17466 if (TARGET_DEBUG_BUILTIN)
17467 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17468 d->name);
17470 continue;
17473 mode0 = insn_data[icode].operand[0].mode;
17474 mode1 = insn_data[icode].operand[1].mode;
17476 if (mode0 == V2SImode && mode1 == QImode)
17478 if (! (type = v2si_ftype_qi))
17479 type = v2si_ftype_qi
17480 = build_function_type_list (opaque_V2SI_type_node,
17481 char_type_node,
17482 NULL_TREE);
17485 else
17486 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17487 d->code, d->name);
17490 def_builtin (d->name, type, d->code);
17493 /* Add the simple no-argument operators. */
17494 d = bdesc_0arg;
17495 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17497 machine_mode mode0;
17498 tree type;
17499 HOST_WIDE_INT mask = d->mask;
17501 if ((mask & builtin_mask) != mask)
17503 if (TARGET_DEBUG_BUILTIN)
17504 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17505 continue;
17507 if (rs6000_overloaded_builtin_p (d->code))
17509 if (!opaque_ftype_opaque)
17510 opaque_ftype_opaque
17511 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17512 type = opaque_ftype_opaque;
17514 else
17516 enum insn_code icode = d->icode;
17517 if (d->name == 0)
17519 if (TARGET_DEBUG_BUILTIN)
17520 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17521 (long unsigned) i);
17522 continue;
17524 if (icode == CODE_FOR_nothing)
17526 if (TARGET_DEBUG_BUILTIN)
17527 fprintf (stderr,
17528 "rs6000_builtin, skip no-argument %s (no code)\n",
17529 d->name);
17530 continue;
17532 mode0 = insn_data[icode].operand[0].mode;
17533 if (mode0 == V2SImode)
17535 /* code for SPE */
17536 if (! (type = v2si_ftype))
17538 v2si_ftype
17539 = build_function_type_list (opaque_V2SI_type_node,
17540 NULL_TREE);
17541 type = v2si_ftype;
17544 else
17545 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17546 d->code, d->name);
17548 def_builtin (d->name, type, d->code);
17552 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17553 static void
17554 init_float128_ibm (machine_mode mode)
17556 if (!TARGET_XL_COMPAT)
17558 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17559 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17560 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17561 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17563 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
17565 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17566 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17567 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17568 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17569 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17570 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17571 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17573 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17574 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17575 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17576 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17577 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17578 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17579 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17580 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17583 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
17584 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17586 else
17588 set_optab_libfunc (add_optab, mode, "_xlqadd");
17589 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17590 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17591 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17594 /* Add various conversions for IFmode to use the traditional TFmode
17595 names. */
17596 if (mode == IFmode)
17598 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
17599 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
17600 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
17601 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
17602 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
17603 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
17605 if (TARGET_POWERPC64)
17607 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17608 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17609 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17610 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17615 /* Set up IEEE 128-bit floating point routines. Use different names if the
17616 arguments can be passed in a vector register. The historical PowerPC
17617 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17618 continue to use that if we aren't using vector registers to pass IEEE
17619 128-bit floating point. */
17621 static void
17622 init_float128_ieee (machine_mode mode)
17624 if (FLOAT128_VECTOR_P (mode))
17626 set_optab_libfunc (add_optab, mode, "__addkf3");
17627 set_optab_libfunc (sub_optab, mode, "__subkf3");
17628 set_optab_libfunc (neg_optab, mode, "__negkf2");
17629 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17630 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17631 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17632 set_optab_libfunc (abs_optab, mode, "__abstkf2");
17634 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17635 set_optab_libfunc (ne_optab, mode, "__nekf2");
17636 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17637 set_optab_libfunc (ge_optab, mode, "__gekf2");
17638 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17639 set_optab_libfunc (le_optab, mode, "__lekf2");
17640 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17642 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17643 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17644 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17645 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17647 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
17648 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17649 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
17651 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
17652 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17653 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
17655 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
17656 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
17657 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
17658 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
17659 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
17660 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
17662 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17663 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17664 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17665 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17667 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17668 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17669 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17670 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17672 if (TARGET_POWERPC64)
17674 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17675 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17676 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17677 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17681 else
17683 set_optab_libfunc (add_optab, mode, "_q_add");
17684 set_optab_libfunc (sub_optab, mode, "_q_sub");
17685 set_optab_libfunc (neg_optab, mode, "_q_neg");
17686 set_optab_libfunc (smul_optab, mode, "_q_mul");
17687 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17688 if (TARGET_PPC_GPOPT)
17689 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17691 set_optab_libfunc (eq_optab, mode, "_q_feq");
17692 set_optab_libfunc (ne_optab, mode, "_q_fne");
17693 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17694 set_optab_libfunc (ge_optab, mode, "_q_fge");
17695 set_optab_libfunc (lt_optab, mode, "_q_flt");
17696 set_optab_libfunc (le_optab, mode, "_q_fle");
17698 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17699 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17700 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17701 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17702 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17703 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17704 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17705 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17709 static void
17710 rs6000_init_libfuncs (void)
17712 /* __float128 support. */
17713 if (TARGET_FLOAT128)
17715 init_float128_ibm (IFmode);
17716 init_float128_ieee (KFmode);
17719 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17720 if (TARGET_LONG_DOUBLE_128)
17722 if (!TARGET_IEEEQUAD)
17723 init_float128_ibm (TFmode);
17725 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17726 else
17727 init_float128_ieee (TFmode);
17732 /* Expand a block clear operation, and return 1 if successful. Return 0
17733 if we should let the compiler generate normal code.
17735 operands[0] is the destination
17736 operands[1] is the length
17737 operands[3] is the alignment */
17740 expand_block_clear (rtx operands[])
17742 rtx orig_dest = operands[0];
17743 rtx bytes_rtx = operands[1];
17744 rtx align_rtx = operands[3];
17745 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
17746 HOST_WIDE_INT align;
17747 HOST_WIDE_INT bytes;
17748 int offset;
17749 int clear_bytes;
17750 int clear_step;
17752 /* If this is not a fixed size move, just call memcpy */
17753 if (! constp)
17754 return 0;
17756 /* This must be a fixed size alignment */
17757 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
17758 align = INTVAL (align_rtx) * BITS_PER_UNIT;
17760 /* Anything to clear? */
17761 bytes = INTVAL (bytes_rtx);
17762 if (bytes <= 0)
17763 return 1;
17765 /* Use the builtin memset after a point, to avoid huge code bloat.
17766 When optimize_size, avoid any significant code bloat; calling
17767 memset is about 4 instructions, so allow for one instruction to
17768 load zero and three to do clearing. */
17769 if (TARGET_ALTIVEC && align >= 128)
17770 clear_step = 16;
17771 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
17772 clear_step = 8;
17773 else if (TARGET_SPE && align >= 64)
17774 clear_step = 8;
17775 else
17776 clear_step = 4;
17778 if (optimize_size && bytes > 3 * clear_step)
17779 return 0;
17780 if (! optimize_size && bytes > 8 * clear_step)
17781 return 0;
17783 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
17785 machine_mode mode = BLKmode;
17786 rtx dest;
17788 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
17790 clear_bytes = 16;
17791 mode = V4SImode;
17793 else if (bytes >= 8 && TARGET_SPE && align >= 64)
17795 clear_bytes = 8;
17796 mode = V2SImode;
17798 else if (bytes >= 8 && TARGET_POWERPC64
17799 && (align >= 64 || !STRICT_ALIGNMENT))
17801 clear_bytes = 8;
17802 mode = DImode;
17803 if (offset == 0 && align < 64)
17805 rtx addr;
17807 /* If the address form is reg+offset with offset not a
17808 multiple of four, reload into reg indirect form here
17809 rather than waiting for reload. This way we get one
17810 reload, not one per store. */
17811 addr = XEXP (orig_dest, 0);
17812 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
17813 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17814 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
17816 addr = copy_addr_to_reg (addr);
17817 orig_dest = replace_equiv_address (orig_dest, addr);
17821 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
17822 { /* move 4 bytes */
17823 clear_bytes = 4;
17824 mode = SImode;
17826 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
17827 { /* move 2 bytes */
17828 clear_bytes = 2;
17829 mode = HImode;
17831 else /* move 1 byte at a time */
17833 clear_bytes = 1;
17834 mode = QImode;
17837 dest = adjust_address (orig_dest, mode, offset);
17839 emit_move_insn (dest, CONST0_RTX (mode));
17842 return 1;
17846 /* Expand a block move operation, and return 1 if successful. Return 0
17847 if we should let the compiler generate normal code.
17849 operands[0] is the destination
17850 operands[1] is the source
17851 operands[2] is the length
17852 operands[3] is the alignment */
17854 #define MAX_MOVE_REG 4
17857 expand_block_move (rtx operands[])
17859 rtx orig_dest = operands[0];
17860 rtx orig_src = operands[1];
17861 rtx bytes_rtx = operands[2];
17862 rtx align_rtx = operands[3];
17863 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
17864 int align;
17865 int bytes;
17866 int offset;
17867 int move_bytes;
17868 rtx stores[MAX_MOVE_REG];
17869 int num_reg = 0;
17871 /* If this is not a fixed size move, just call memcpy */
17872 if (! constp)
17873 return 0;
17875 /* This must be a fixed size alignment */
17876 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
17877 align = INTVAL (align_rtx) * BITS_PER_UNIT;
17879 /* Anything to move? */
17880 bytes = INTVAL (bytes_rtx);
17881 if (bytes <= 0)
17882 return 1;
17884 if (bytes > rs6000_block_move_inline_limit)
17885 return 0;
17887 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
17889 union {
17890 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
17891 rtx (*mov) (rtx, rtx);
17892 } gen_func;
17893 machine_mode mode = BLKmode;
17894 rtx src, dest;
17896 /* Altivec first, since it will be faster than a string move
17897 when it applies, and usually not significantly larger. */
17898 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
17900 move_bytes = 16;
17901 mode = V4SImode;
17902 gen_func.mov = gen_movv4si;
17904 else if (TARGET_SPE && bytes >= 8 && align >= 64)
17906 move_bytes = 8;
17907 mode = V2SImode;
17908 gen_func.mov = gen_movv2si;
17910 else if (TARGET_STRING
17911 && bytes > 24 /* move up to 32 bytes at a time */
17912 && ! fixed_regs[5]
17913 && ! fixed_regs[6]
17914 && ! fixed_regs[7]
17915 && ! fixed_regs[8]
17916 && ! fixed_regs[9]
17917 && ! fixed_regs[10]
17918 && ! fixed_regs[11]
17919 && ! fixed_regs[12])
17921 move_bytes = (bytes > 32) ? 32 : bytes;
17922 gen_func.movmemsi = gen_movmemsi_8reg;
17924 else if (TARGET_STRING
17925 && bytes > 16 /* move up to 24 bytes at a time */
17926 && ! fixed_regs[5]
17927 && ! fixed_regs[6]
17928 && ! fixed_regs[7]
17929 && ! fixed_regs[8]
17930 && ! fixed_regs[9]
17931 && ! fixed_regs[10])
17933 move_bytes = (bytes > 24) ? 24 : bytes;
17934 gen_func.movmemsi = gen_movmemsi_6reg;
17936 else if (TARGET_STRING
17937 && bytes > 8 /* move up to 16 bytes at a time */
17938 && ! fixed_regs[5]
17939 && ! fixed_regs[6]
17940 && ! fixed_regs[7]
17941 && ! fixed_regs[8])
17943 move_bytes = (bytes > 16) ? 16 : bytes;
17944 gen_func.movmemsi = gen_movmemsi_4reg;
17946 else if (bytes >= 8 && TARGET_POWERPC64
17947 && (align >= 64 || !STRICT_ALIGNMENT))
17949 move_bytes = 8;
17950 mode = DImode;
17951 gen_func.mov = gen_movdi;
17952 if (offset == 0 && align < 64)
17954 rtx addr;
17956 /* If the address form is reg+offset with offset not a
17957 multiple of four, reload into reg indirect form here
17958 rather than waiting for reload. This way we get one
17959 reload, not one per load and/or store. */
17960 addr = XEXP (orig_dest, 0);
17961 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
17962 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17963 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
17965 addr = copy_addr_to_reg (addr);
17966 orig_dest = replace_equiv_address (orig_dest, addr);
17968 addr = XEXP (orig_src, 0);
17969 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
17970 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17971 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
17973 addr = copy_addr_to_reg (addr);
17974 orig_src = replace_equiv_address (orig_src, addr);
17978 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
17979 { /* move up to 8 bytes at a time */
17980 move_bytes = (bytes > 8) ? 8 : bytes;
17981 gen_func.movmemsi = gen_movmemsi_2reg;
17983 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
17984 { /* move 4 bytes */
17985 move_bytes = 4;
17986 mode = SImode;
17987 gen_func.mov = gen_movsi;
17989 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
17990 { /* move 2 bytes */
17991 move_bytes = 2;
17992 mode = HImode;
17993 gen_func.mov = gen_movhi;
17995 else if (TARGET_STRING && bytes > 1)
17996 { /* move up to 4 bytes at a time */
17997 move_bytes = (bytes > 4) ? 4 : bytes;
17998 gen_func.movmemsi = gen_movmemsi_1reg;
18000 else /* move 1 byte at a time */
18002 move_bytes = 1;
18003 mode = QImode;
18004 gen_func.mov = gen_movqi;
18007 src = adjust_address (orig_src, mode, offset);
18008 dest = adjust_address (orig_dest, mode, offset);
18010 if (mode != BLKmode)
18012 rtx tmp_reg = gen_reg_rtx (mode);
18014 emit_insn ((*gen_func.mov) (tmp_reg, src));
18015 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
18018 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
18020 int i;
18021 for (i = 0; i < num_reg; i++)
18022 emit_insn (stores[i]);
18023 num_reg = 0;
18026 if (mode == BLKmode)
18028 /* Move the address into scratch registers. The movmemsi
18029 patterns require zero offset. */
18030 if (!REG_P (XEXP (src, 0)))
18032 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
18033 src = replace_equiv_address (src, src_reg);
18035 set_mem_size (src, move_bytes);
18037 if (!REG_P (XEXP (dest, 0)))
18039 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
18040 dest = replace_equiv_address (dest, dest_reg);
18042 set_mem_size (dest, move_bytes);
18044 emit_insn ((*gen_func.movmemsi) (dest, src,
18045 GEN_INT (move_bytes & 31),
18046 align_rtx));
18050 return 1;
18054 /* Return a string to perform a load_multiple operation.
18055 operands[0] is the vector.
18056 operands[1] is the source address.
18057 operands[2] is the first destination register. */
18059 const char *
18060 rs6000_output_load_multiple (rtx operands[3])
18062 /* We have to handle the case where the pseudo used to contain the address
18063 is assigned to one of the output registers. */
18064 int i, j;
18065 int words = XVECLEN (operands[0], 0);
18066 rtx xop[10];
18068 if (XVECLEN (operands[0], 0) == 1)
18069 return "lwz %2,0(%1)";
18071 for (i = 0; i < words; i++)
18072 if (refers_to_regno_p (REGNO (operands[2]) + i, operands[1]))
18074 if (i == words-1)
18076 xop[0] = GEN_INT (4 * (words-1));
18077 xop[1] = operands[1];
18078 xop[2] = operands[2];
18079 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
18080 return "";
18082 else if (i == 0)
18084 xop[0] = GEN_INT (4 * (words-1));
18085 xop[1] = operands[1];
18086 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
18087 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
18088 return "";
18090 else
18092 for (j = 0; j < words; j++)
18093 if (j != i)
18095 xop[0] = GEN_INT (j * 4);
18096 xop[1] = operands[1];
18097 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
18098 output_asm_insn ("lwz %2,%0(%1)", xop);
18100 xop[0] = GEN_INT (i * 4);
18101 xop[1] = operands[1];
18102 output_asm_insn ("lwz %1,%0(%1)", xop);
18103 return "";
18107 return "lswi %2,%1,%N0";
18111 /* A validation routine: say whether CODE, a condition code, and MODE
18112 match. The other alternatives either don't make sense or should
18113 never be generated. */
18115 void
18116 validate_condition_mode (enum rtx_code code, machine_mode mode)
18118 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18119 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18120 && GET_MODE_CLASS (mode) == MODE_CC);
18122 /* These don't make sense. */
18123 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18124 || mode != CCUNSmode);
18126 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18127 || mode == CCUNSmode);
18129 gcc_assert (mode == CCFPmode
18130 || (code != ORDERED && code != UNORDERED
18131 && code != UNEQ && code != LTGT
18132 && code != UNGT && code != UNLT
18133 && code != UNGE && code != UNLE));
18135 /* These should never be generated except for
18136 flag_finite_math_only. */
18137 gcc_assert (mode != CCFPmode
18138 || flag_finite_math_only
18139 || (code != LE && code != GE
18140 && code != UNEQ && code != LTGT
18141 && code != UNGT && code != UNLT));
18143 /* These are invalid; the information is not there. */
18144 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18148 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18149 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18150 not zero, store there the bit offset (counted from the right) where
18151 the single stretch of 1 bits begins; and similarly for B, the bit
18152 offset where it ends. */
18154 bool
18155 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18157 unsigned HOST_WIDE_INT val = INTVAL (mask);
18158 unsigned HOST_WIDE_INT bit;
18159 int nb, ne;
18160 int n = GET_MODE_PRECISION (mode);
18162 if (mode != DImode && mode != SImode)
18163 return false;
18165 if (INTVAL (mask) >= 0)
18167 bit = val & -val;
18168 ne = exact_log2 (bit);
18169 nb = exact_log2 (val + bit);
18171 else if (val + 1 == 0)
18173 nb = n;
18174 ne = 0;
18176 else if (val & 1)
18178 val = ~val;
18179 bit = val & -val;
18180 nb = exact_log2 (bit);
18181 ne = exact_log2 (val + bit);
18183 else
18185 bit = val & -val;
18186 ne = exact_log2 (bit);
18187 if (val + bit == 0)
18188 nb = n;
18189 else
18190 nb = 0;
18193 nb--;
18195 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18196 return false;
18198 if (b)
18199 *b = nb;
18200 if (e)
18201 *e = ne;
18203 return true;
18206 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18207 or rldicr instruction, to implement an AND with it in mode MODE. */
18209 bool
18210 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18212 int nb, ne;
18214 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18215 return false;
18217 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18218 does not wrap. */
18219 if (mode == DImode)
18220 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18222 /* For SImode, rlwinm can do everything. */
18223 if (mode == SImode)
18224 return (nb < 32 && ne < 32);
18226 return false;
18229 /* Return the instruction template for an AND with mask in mode MODE, with
18230 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18232 const char *
18233 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18235 int nb, ne;
18237 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18238 gcc_unreachable ();
18240 if (mode == DImode && ne == 0)
18242 operands[3] = GEN_INT (63 - nb);
18243 if (dot)
18244 return "rldicl. %0,%1,0,%3";
18245 return "rldicl %0,%1,0,%3";
18248 if (mode == DImode && nb == 63)
18250 operands[3] = GEN_INT (63 - ne);
18251 if (dot)
18252 return "rldicr. %0,%1,0,%3";
18253 return "rldicr %0,%1,0,%3";
18256 if (nb < 32 && ne < 32)
18258 operands[3] = GEN_INT (31 - nb);
18259 operands[4] = GEN_INT (31 - ne);
18260 if (dot)
18261 return "rlwinm. %0,%1,0,%3,%4";
18262 return "rlwinm %0,%1,0,%3,%4";
18265 gcc_unreachable ();
18268 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18269 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18270 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18272 bool
18273 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18275 int nb, ne;
18277 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18278 return false;
18280 int n = GET_MODE_PRECISION (mode);
18281 int sh = -1;
18283 if (CONST_INT_P (XEXP (shift, 1)))
18285 sh = INTVAL (XEXP (shift, 1));
18286 if (sh < 0 || sh >= n)
18287 return false;
18290 rtx_code code = GET_CODE (shift);
18292 /* Convert any shift by 0 to a rotate, to simplify below code. */
18293 if (sh == 0)
18294 code = ROTATE;
18296 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18297 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18298 code = ASHIFT;
18299 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18301 code = LSHIFTRT;
18302 sh = n - sh;
18305 /* DImode rotates need rld*. */
18306 if (mode == DImode && code == ROTATE)
18307 return (nb == 63 || ne == 0 || ne == sh);
18309 /* SImode rotates need rlw*. */
18310 if (mode == SImode && code == ROTATE)
18311 return (nb < 32 && ne < 32 && sh < 32);
18313 /* Wrap-around masks are only okay for rotates. */
18314 if (ne > nb)
18315 return false;
18317 /* Variable shifts are only okay for rotates. */
18318 if (sh < 0)
18319 return false;
18321 /* Don't allow ASHIFT if the mask is wrong for that. */
18322 if (code == ASHIFT && ne < sh)
18323 return false;
18325 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18326 if the mask is wrong for that. */
18327 if (nb < 32 && ne < 32 && sh < 32
18328 && !(code == LSHIFTRT && nb >= 32 - sh))
18329 return true;
18331 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18332 if the mask is wrong for that. */
18333 if (code == LSHIFTRT)
18334 sh = 64 - sh;
18335 if (nb == 63 || ne == 0 || ne == sh)
18336 return !(code == LSHIFTRT && nb >= sh);
18338 return false;
18341 /* Return the instruction template for a shift with mask in mode MODE, with
18342 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18344 const char *
18345 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18347 int nb, ne;
18349 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18350 gcc_unreachable ();
18352 if (mode == DImode && ne == 0)
18354 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18355 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18356 operands[3] = GEN_INT (63 - nb);
18357 if (dot)
18358 return "rld%I2cl. %0,%1,%2,%3";
18359 return "rld%I2cl %0,%1,%2,%3";
18362 if (mode == DImode && nb == 63)
18364 operands[3] = GEN_INT (63 - ne);
18365 if (dot)
18366 return "rld%I2cr. %0,%1,%2,%3";
18367 return "rld%I2cr %0,%1,%2,%3";
18370 if (mode == DImode
18371 && GET_CODE (operands[4]) != LSHIFTRT
18372 && CONST_INT_P (operands[2])
18373 && ne == INTVAL (operands[2]))
18375 operands[3] = GEN_INT (63 - nb);
18376 if (dot)
18377 return "rld%I2c. %0,%1,%2,%3";
18378 return "rld%I2c %0,%1,%2,%3";
18381 if (nb < 32 && ne < 32)
18383 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18384 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18385 operands[3] = GEN_INT (31 - nb);
18386 operands[4] = GEN_INT (31 - ne);
18387 /* This insn can also be a 64-bit rotate with mask that really makes
18388 it just a shift right (with mask); the %h below are to adjust for
18389 that situation (shift count is >= 32 in that case). */
18390 if (dot)
18391 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18392 return "rlw%I2nm %0,%1,%h2,%3,%4";
18395 gcc_unreachable ();
18398 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18399 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18400 ASHIFT, or LSHIFTRT) in mode MODE. */
18402 bool
18403 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18405 int nb, ne;
18407 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18408 return false;
18410 int n = GET_MODE_PRECISION (mode);
18412 int sh = INTVAL (XEXP (shift, 1));
18413 if (sh < 0 || sh >= n)
18414 return false;
18416 rtx_code code = GET_CODE (shift);
18418 /* Convert any shift by 0 to a rotate, to simplify below code. */
18419 if (sh == 0)
18420 code = ROTATE;
18422 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18423 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18424 code = ASHIFT;
18425 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18427 code = LSHIFTRT;
18428 sh = n - sh;
18431 /* DImode rotates need rldimi. */
18432 if (mode == DImode && code == ROTATE)
18433 return (ne == sh);
18435 /* SImode rotates need rlwimi. */
18436 if (mode == SImode && code == ROTATE)
18437 return (nb < 32 && ne < 32 && sh < 32);
18439 /* Wrap-around masks are only okay for rotates. */
18440 if (ne > nb)
18441 return false;
18443 /* Don't allow ASHIFT if the mask is wrong for that. */
18444 if (code == ASHIFT && ne < sh)
18445 return false;
18447 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18448 if the mask is wrong for that. */
18449 if (nb < 32 && ne < 32 && sh < 32
18450 && !(code == LSHIFTRT && nb >= 32 - sh))
18451 return true;
18453 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18454 if the mask is wrong for that. */
18455 if (code == LSHIFTRT)
18456 sh = 64 - sh;
18457 if (ne == sh)
18458 return !(code == LSHIFTRT && nb >= sh);
18460 return false;
18463 /* Return the instruction template for an insert with mask in mode MODE, with
18464 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18466 const char *
18467 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18469 int nb, ne;
18471 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18472 gcc_unreachable ();
18474 /* Prefer rldimi because rlwimi is cracked. */
18475 if (TARGET_POWERPC64
18476 && (!dot || mode == DImode)
18477 && GET_CODE (operands[4]) != LSHIFTRT
18478 && ne == INTVAL (operands[2]))
18480 operands[3] = GEN_INT (63 - nb);
18481 if (dot)
18482 return "rldimi. %0,%1,%2,%3";
18483 return "rldimi %0,%1,%2,%3";
18486 if (nb < 32 && ne < 32)
18488 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18489 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18490 operands[3] = GEN_INT (31 - nb);
18491 operands[4] = GEN_INT (31 - ne);
18492 if (dot)
18493 return "rlwimi. %0,%1,%2,%3,%4";
18494 return "rlwimi %0,%1,%2,%3,%4";
18497 gcc_unreachable ();
18500 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18501 using two machine instructions. */
18503 bool
18504 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18506 /* There are two kinds of AND we can handle with two insns:
18507 1) those we can do with two rl* insn;
18508 2) ori[s];xori[s].
18510 We do not handle that last case yet. */
18512 /* If there is just one stretch of ones, we can do it. */
18513 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18514 return true;
18516 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18517 one insn, we can do the whole thing with two. */
18518 unsigned HOST_WIDE_INT val = INTVAL (c);
18519 unsigned HOST_WIDE_INT bit1 = val & -val;
18520 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18521 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18522 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18523 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18526 /* Emit a potentially record-form instruction, setting DST from SRC.
18527 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18528 signed comparison of DST with zero. If DOT is 1, the generated RTL
18529 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18530 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18531 a separate COMPARE. */
18533 static void
18534 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18536 if (dot == 0)
18538 emit_move_insn (dst, src);
18539 return;
18542 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18544 emit_move_insn (dst, src);
18545 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18546 return;
18549 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18550 if (dot == 1)
18552 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18553 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18555 else
18557 rtx set = gen_rtx_SET (dst, src);
18558 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18562 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18563 If EXPAND is true, split rotate-and-mask instructions we generate to
18564 their constituent parts as well (this is used during expand); if DOT
18565 is 1, make the last insn a record-form instruction clobbering the
18566 destination GPR and setting the CC reg (from operands[3]); if 2, set
18567 that GPR as well as the CC reg. */
18569 void
18570 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18572 gcc_assert (!(expand && dot));
18574 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18576 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18577 shift right. This generates better code than doing the masks without
18578 shifts, or shifting first right and then left. */
18579 int nb, ne;
18580 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18582 gcc_assert (mode == DImode);
18584 int shift = 63 - nb;
18585 if (expand)
18587 rtx tmp1 = gen_reg_rtx (DImode);
18588 rtx tmp2 = gen_reg_rtx (DImode);
18589 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18590 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18591 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18593 else
18595 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18596 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18597 emit_move_insn (operands[0], tmp);
18598 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18599 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18601 return;
18604 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18605 that does the rest. */
18606 unsigned HOST_WIDE_INT bit1 = val & -val;
18607 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18608 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18609 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18611 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18612 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18614 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18616 /* Two "no-rotate"-and-mask instructions, for SImode. */
18617 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18619 gcc_assert (mode == SImode);
18621 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18622 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18623 emit_move_insn (reg, tmp);
18624 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18625 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18626 return;
18629 gcc_assert (mode == DImode);
18631 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18632 insns; we have to do the first in SImode, because it wraps. */
18633 if (mask2 <= 0xffffffff
18634 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18636 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18637 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18638 GEN_INT (mask1));
18639 rtx reg_low = gen_lowpart (SImode, reg);
18640 emit_move_insn (reg_low, tmp);
18641 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18642 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18643 return;
18646 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18647 at the top end), rotate back and clear the other hole. */
18648 int right = exact_log2 (bit3);
18649 int left = 64 - right;
18651 /* Rotate the mask too. */
18652 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18654 if (expand)
18656 rtx tmp1 = gen_reg_rtx (DImode);
18657 rtx tmp2 = gen_reg_rtx (DImode);
18658 rtx tmp3 = gen_reg_rtx (DImode);
18659 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18660 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18661 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18662 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18664 else
18666 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18667 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18668 emit_move_insn (operands[0], tmp);
18669 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18670 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18671 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18675 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18676 for lfq and stfq insns iff the registers are hard registers. */
18679 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18681 /* We might have been passed a SUBREG. */
18682 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18683 return 0;
18685 /* We might have been passed non floating point registers. */
18686 if (!FP_REGNO_P (REGNO (reg1))
18687 || !FP_REGNO_P (REGNO (reg2)))
18688 return 0;
18690 return (REGNO (reg1) == REGNO (reg2) - 1);
18693 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18694 addr1 and addr2 must be in consecutive memory locations
18695 (addr2 == addr1 + 8). */
18698 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18700 rtx addr1, addr2;
18701 unsigned int reg1, reg2;
18702 int offset1, offset2;
18704 /* The mems cannot be volatile. */
18705 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18706 return 0;
18708 addr1 = XEXP (mem1, 0);
18709 addr2 = XEXP (mem2, 0);
18711 /* Extract an offset (if used) from the first addr. */
18712 if (GET_CODE (addr1) == PLUS)
18714 /* If not a REG, return zero. */
18715 if (GET_CODE (XEXP (addr1, 0)) != REG)
18716 return 0;
18717 else
18719 reg1 = REGNO (XEXP (addr1, 0));
18720 /* The offset must be constant! */
18721 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18722 return 0;
18723 offset1 = INTVAL (XEXP (addr1, 1));
18726 else if (GET_CODE (addr1) != REG)
18727 return 0;
18728 else
18730 reg1 = REGNO (addr1);
18731 /* This was a simple (mem (reg)) expression. Offset is 0. */
18732 offset1 = 0;
18735 /* And now for the second addr. */
18736 if (GET_CODE (addr2) == PLUS)
18738 /* If not a REG, return zero. */
18739 if (GET_CODE (XEXP (addr2, 0)) != REG)
18740 return 0;
18741 else
18743 reg2 = REGNO (XEXP (addr2, 0));
18744 /* The offset must be constant. */
18745 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18746 return 0;
18747 offset2 = INTVAL (XEXP (addr2, 1));
18750 else if (GET_CODE (addr2) != REG)
18751 return 0;
18752 else
18754 reg2 = REGNO (addr2);
18755 /* This was a simple (mem (reg)) expression. Offset is 0. */
18756 offset2 = 0;
18759 /* Both of these must have the same base register. */
18760 if (reg1 != reg2)
18761 return 0;
18763 /* The offset for the second addr must be 8 more than the first addr. */
18764 if (offset2 != offset1 + 8)
18765 return 0;
18767 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18768 instructions. */
18769 return 1;
18774 rs6000_secondary_memory_needed_rtx (machine_mode mode)
18776 static bool eliminated = false;
18777 rtx ret;
18779 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
18780 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
18781 else
18783 rtx mem = cfun->machine->sdmode_stack_slot;
18784 gcc_assert (mem != NULL_RTX);
18786 if (!eliminated)
18788 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
18789 cfun->machine->sdmode_stack_slot = mem;
18790 eliminated = true;
18792 ret = mem;
18795 if (TARGET_DEBUG_ADDR)
18797 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
18798 GET_MODE_NAME (mode));
18799 if (!ret)
18800 fprintf (stderr, "\tNULL_RTX\n");
18801 else
18802 debug_rtx (ret);
18805 return ret;
18808 /* Return the mode to be used for memory when a secondary memory
18809 location is needed. For SDmode values we need to use DDmode, in
18810 all other cases we can use the same mode. */
18811 machine_mode
18812 rs6000_secondary_memory_needed_mode (machine_mode mode)
18814 if (lra_in_progress && mode == SDmode)
18815 return DDmode;
18816 return mode;
18819 static tree
18820 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
18822 /* Don't walk into types. */
18823 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
18825 *walk_subtrees = 0;
18826 return NULL_TREE;
18829 switch (TREE_CODE (*tp))
18831 case VAR_DECL:
18832 case PARM_DECL:
18833 case FIELD_DECL:
18834 case RESULT_DECL:
18835 case SSA_NAME:
18836 case REAL_CST:
18837 case MEM_REF:
18838 case VIEW_CONVERT_EXPR:
18839 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
18840 return *tp;
18841 break;
18842 default:
18843 break;
18846 return NULL_TREE;
18849 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18850 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18851 only work on the traditional altivec registers, note if an altivec register
18852 was chosen. */
18854 static enum rs6000_reg_type
18855 register_to_reg_type (rtx reg, bool *is_altivec)
18857 HOST_WIDE_INT regno;
18858 enum reg_class rclass;
18860 if (GET_CODE (reg) == SUBREG)
18861 reg = SUBREG_REG (reg);
18863 if (!REG_P (reg))
18864 return NO_REG_TYPE;
18866 regno = REGNO (reg);
18867 if (regno >= FIRST_PSEUDO_REGISTER)
18869 if (!lra_in_progress && !reload_in_progress && !reload_completed)
18870 return PSEUDO_REG_TYPE;
18872 regno = true_regnum (reg);
18873 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18874 return PSEUDO_REG_TYPE;
18877 gcc_assert (regno >= 0);
18879 if (is_altivec && ALTIVEC_REGNO_P (regno))
18880 *is_altivec = true;
18882 rclass = rs6000_regno_regclass[regno];
18883 return reg_class_to_reg_type[(int)rclass];
18886 /* Helper function to return the cost of adding a TOC entry address. */
18888 static inline int
18889 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18891 int ret;
18893 if (TARGET_CMODEL != CMODEL_SMALL)
18894 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18896 else
18897 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18899 return ret;
18902 /* Helper function for rs6000_secondary_reload to determine whether the memory
18903 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18904 needs reloading. Return negative if the memory is not handled by the memory
18905 helper functions and to try a different reload method, 0 if no additional
18906 instructions are need, and positive to give the extra cost for the
18907 memory. */
18909 static int
18910 rs6000_secondary_reload_memory (rtx addr,
18911 enum reg_class rclass,
18912 machine_mode mode)
18914 int extra_cost = 0;
18915 rtx reg, and_arg, plus_arg0, plus_arg1;
18916 addr_mask_type addr_mask;
18917 const char *type = NULL;
18918 const char *fail_msg = NULL;
18920 if (GPR_REG_CLASS_P (rclass))
18921 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18923 else if (rclass == FLOAT_REGS)
18924 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18926 else if (rclass == ALTIVEC_REGS)
18927 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18929 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18930 else if (rclass == VSX_REGS)
18931 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18932 & ~RELOAD_REG_AND_M16);
18934 /* If the register allocator hasn't made up its mind yet on the register
18935 class to use, settle on defaults to use. */
18936 else if (rclass == NO_REGS)
18938 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18939 & ~RELOAD_REG_AND_M16);
18941 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18942 addr_mask &= ~(RELOAD_REG_INDEXED
18943 | RELOAD_REG_PRE_INCDEC
18944 | RELOAD_REG_PRE_MODIFY);
18947 else
18948 addr_mask = 0;
18950 /* If the register isn't valid in this register class, just return now. */
18951 if ((addr_mask & RELOAD_REG_VALID) == 0)
18953 if (TARGET_DEBUG_ADDR)
18955 fprintf (stderr,
18956 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18957 "not valid in class\n",
18958 GET_MODE_NAME (mode), reg_class_names[rclass]);
18959 debug_rtx (addr);
18962 return -1;
18965 switch (GET_CODE (addr))
18967 /* Does the register class supports auto update forms for this mode? We
18968 don't need a scratch register, since the powerpc only supports
18969 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18970 case PRE_INC:
18971 case PRE_DEC:
18972 reg = XEXP (addr, 0);
18973 if (!base_reg_operand (addr, GET_MODE (reg)))
18975 fail_msg = "no base register #1";
18976 extra_cost = -1;
18979 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18981 extra_cost = 1;
18982 type = "update";
18984 break;
18986 case PRE_MODIFY:
18987 reg = XEXP (addr, 0);
18988 plus_arg1 = XEXP (addr, 1);
18989 if (!base_reg_operand (reg, GET_MODE (reg))
18990 || GET_CODE (plus_arg1) != PLUS
18991 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18993 fail_msg = "bad PRE_MODIFY";
18994 extra_cost = -1;
18997 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18999 extra_cost = 1;
19000 type = "update";
19002 break;
19004 /* Do we need to simulate AND -16 to clear the bottom address bits used
19005 in VMX load/stores? Only allow the AND for vector sizes. */
19006 case AND:
19007 and_arg = XEXP (addr, 0);
19008 if (GET_MODE_SIZE (mode) != 16
19009 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19010 || INTVAL (XEXP (addr, 1)) != -16)
19012 fail_msg = "bad Altivec AND #1";
19013 extra_cost = -1;
19016 if (rclass != ALTIVEC_REGS)
19018 if (legitimate_indirect_address_p (and_arg, false))
19019 extra_cost = 1;
19021 else if (legitimate_indexed_address_p (and_arg, false))
19022 extra_cost = 2;
19024 else
19026 fail_msg = "bad Altivec AND #2";
19027 extra_cost = -1;
19030 type = "and";
19032 break;
19034 /* If this is an indirect address, make sure it is a base register. */
19035 case REG:
19036 case SUBREG:
19037 if (!legitimate_indirect_address_p (addr, false))
19039 extra_cost = 1;
19040 type = "move";
19042 break;
19044 /* If this is an indexed address, make sure the register class can handle
19045 indexed addresses for this mode. */
19046 case PLUS:
19047 plus_arg0 = XEXP (addr, 0);
19048 plus_arg1 = XEXP (addr, 1);
19050 /* (plus (plus (reg) (constant)) (constant)) is generated during
19051 push_reload processing, so handle it now. */
19052 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19054 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19056 extra_cost = 1;
19057 type = "offset";
19061 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19062 push_reload processing, so handle it now. */
19063 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19065 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19067 extra_cost = 1;
19068 type = "indexed #2";
19072 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19074 fail_msg = "no base register #2";
19075 extra_cost = -1;
19078 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19080 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19081 || !legitimate_indexed_address_p (addr, false))
19083 extra_cost = 1;
19084 type = "indexed";
19088 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19089 && CONST_INT_P (plus_arg1))
19091 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19093 extra_cost = 1;
19094 type = "vector d-form offset";
19098 /* Make sure the register class can handle offset addresses. */
19099 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19101 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19103 extra_cost = 1;
19104 type = "offset #2";
19108 else
19110 fail_msg = "bad PLUS";
19111 extra_cost = -1;
19114 break;
19116 case LO_SUM:
19117 /* Quad offsets are restricted and can't handle normal addresses. */
19118 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19120 extra_cost = -1;
19121 type = "vector d-form lo_sum";
19124 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19126 fail_msg = "bad LO_SUM";
19127 extra_cost = -1;
19130 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19132 extra_cost = 1;
19133 type = "lo_sum";
19135 break;
19137 /* Static addresses need to create a TOC entry. */
19138 case CONST:
19139 case SYMBOL_REF:
19140 case LABEL_REF:
19141 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19143 extra_cost = -1;
19144 type = "vector d-form lo_sum #2";
19147 else
19149 type = "address";
19150 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19152 break;
19154 /* TOC references look like offsetable memory. */
19155 case UNSPEC:
19156 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19158 fail_msg = "bad UNSPEC";
19159 extra_cost = -1;
19162 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19164 extra_cost = -1;
19165 type = "vector d-form lo_sum #3";
19168 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19170 extra_cost = 1;
19171 type = "toc reference";
19173 break;
19175 default:
19177 fail_msg = "bad address";
19178 extra_cost = -1;
19182 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19184 if (extra_cost < 0)
19185 fprintf (stderr,
19186 "rs6000_secondary_reload_memory error: mode = %s, "
19187 "class = %s, addr_mask = '%s', %s\n",
19188 GET_MODE_NAME (mode),
19189 reg_class_names[rclass],
19190 rs6000_debug_addr_mask (addr_mask, false),
19191 (fail_msg != NULL) ? fail_msg : "<bad address>");
19193 else
19194 fprintf (stderr,
19195 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19196 "addr_mask = '%s', extra cost = %d, %s\n",
19197 GET_MODE_NAME (mode),
19198 reg_class_names[rclass],
19199 rs6000_debug_addr_mask (addr_mask, false),
19200 extra_cost,
19201 (type) ? type : "<none>");
19203 debug_rtx (addr);
19206 return extra_cost;
19209 /* Helper function for rs6000_secondary_reload to return true if a move to a
19210 different register classe is really a simple move. */
19212 static bool
19213 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19214 enum rs6000_reg_type from_type,
19215 machine_mode mode)
19217 int size;
19219 /* Add support for various direct moves available. In this function, we only
19220 look at cases where we don't need any extra registers, and one or more
19221 simple move insns are issued. At present, 32-bit integers are not allowed
19222 in FPR/VSX registers. Single precision binary floating is not a simple
19223 move because we need to convert to the single precision memory layout.
19224 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19225 need special direct move handling, which we do not support yet. */
19226 size = GET_MODE_SIZE (mode);
19227 if (TARGET_DIRECT_MOVE
19228 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
19229 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19230 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19231 return true;
19233 else if (TARGET_DIRECT_MOVE_128 && size == 16 && mode != TDmode
19234 && ((to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19235 || (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)))
19236 return true;
19238 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19239 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19240 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19241 return true;
19243 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19244 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19245 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19246 return true;
19248 return false;
19251 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19252 special direct moves that involve allocating an extra register, return the
19253 insn code of the helper function if there is such a function or
19254 CODE_FOR_nothing if not. */
19256 static bool
19257 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19258 enum rs6000_reg_type from_type,
19259 machine_mode mode,
19260 secondary_reload_info *sri,
19261 bool altivec_p)
19263 bool ret = false;
19264 enum insn_code icode = CODE_FOR_nothing;
19265 int cost = 0;
19266 int size = GET_MODE_SIZE (mode);
19268 if (TARGET_POWERPC64)
19270 if (size == 16)
19272 /* Handle moving 128-bit values from GPRs to VSX point registers on
19273 ISA 2.07 (power8, power9) when running in 64-bit mode using
19274 XXPERMDI to glue the two 64-bit values back together. */
19275 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19277 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19278 icode = reg_addr[mode].reload_vsx_gpr;
19281 /* Handle moving 128-bit values from VSX point registers to GPRs on
19282 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19283 bottom 64-bit value. */
19284 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19286 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19287 icode = reg_addr[mode].reload_gpr_vsx;
19291 else if (mode == SFmode)
19293 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19295 cost = 3; /* xscvdpspn, mfvsrd, and. */
19296 icode = reg_addr[mode].reload_gpr_vsx;
19299 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19301 cost = 2; /* mtvsrz, xscvspdpn. */
19302 icode = reg_addr[mode].reload_vsx_gpr;
19307 if (TARGET_POWERPC64 && size == 16)
19309 /* Handle moving 128-bit values from GPRs to VSX point registers on
19310 ISA 2.07 when running in 64-bit mode using XXPERMDI to glue the two
19311 64-bit values back together. */
19312 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19314 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19315 icode = reg_addr[mode].reload_vsx_gpr;
19318 /* Handle moving 128-bit values from VSX point registers to GPRs on
19319 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19320 bottom 64-bit value. */
19321 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19323 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19324 icode = reg_addr[mode].reload_gpr_vsx;
19328 else if (!TARGET_POWERPC64 && size == 8)
19330 /* Handle moving 64-bit values from GPRs to floating point registers on
19331 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19332 32-bit values back together. Altivec register classes must be handled
19333 specially since a different instruction is used, and the secondary
19334 reload support requires a single instruction class in the scratch
19335 register constraint. However, right now TFmode is not allowed in
19336 Altivec registers, so the pattern will never match. */
19337 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19339 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19340 icode = reg_addr[mode].reload_fpr_gpr;
19344 if (icode != CODE_FOR_nothing)
19346 ret = true;
19347 if (sri)
19349 sri->icode = icode;
19350 sri->extra_cost = cost;
19354 return ret;
19357 /* Return whether a move between two register classes can be done either
19358 directly (simple move) or via a pattern that uses a single extra temporary
19359 (using ISA 2.07's direct move in this case. */
19361 static bool
19362 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19363 enum rs6000_reg_type from_type,
19364 machine_mode mode,
19365 secondary_reload_info *sri,
19366 bool altivec_p)
19368 /* Fall back to load/store reloads if either type is not a register. */
19369 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19370 return false;
19372 /* If we haven't allocated registers yet, assume the move can be done for the
19373 standard register types. */
19374 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19375 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19376 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19377 return true;
19379 /* Moves to the same set of registers is a simple move for non-specialized
19380 registers. */
19381 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19382 return true;
19384 /* Check whether a simple move can be done directly. */
19385 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19387 if (sri)
19389 sri->icode = CODE_FOR_nothing;
19390 sri->extra_cost = 0;
19392 return true;
19395 /* Now check if we can do it in a few steps. */
19396 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19397 altivec_p);
19400 /* Inform reload about cases where moving X with a mode MODE to a register in
19401 RCLASS requires an extra scratch or immediate register. Return the class
19402 needed for the immediate register.
19404 For VSX and Altivec, we may need a register to convert sp+offset into
19405 reg+sp.
19407 For misaligned 64-bit gpr loads and stores we need a register to
19408 convert an offset address to indirect. */
19410 static reg_class_t
19411 rs6000_secondary_reload (bool in_p,
19412 rtx x,
19413 reg_class_t rclass_i,
19414 machine_mode mode,
19415 secondary_reload_info *sri)
19417 enum reg_class rclass = (enum reg_class) rclass_i;
19418 reg_class_t ret = ALL_REGS;
19419 enum insn_code icode;
19420 bool default_p = false;
19421 bool done_p = false;
19423 /* Allow subreg of memory before/during reload. */
19424 bool memory_p = (MEM_P (x)
19425 || (!reload_completed && GET_CODE (x) == SUBREG
19426 && MEM_P (SUBREG_REG (x))));
19428 sri->icode = CODE_FOR_nothing;
19429 sri->t_icode = CODE_FOR_nothing;
19430 sri->extra_cost = 0;
19431 icode = ((in_p)
19432 ? reg_addr[mode].reload_load
19433 : reg_addr[mode].reload_store);
19435 if (REG_P (x) || register_operand (x, mode))
19437 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19438 bool altivec_p = (rclass == ALTIVEC_REGS);
19439 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19441 if (!in_p)
19443 enum rs6000_reg_type exchange = to_type;
19444 to_type = from_type;
19445 from_type = exchange;
19448 /* Can we do a direct move of some sort? */
19449 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19450 altivec_p))
19452 icode = (enum insn_code)sri->icode;
19453 default_p = false;
19454 done_p = true;
19455 ret = NO_REGS;
19459 /* Make sure 0.0 is not reloaded or forced into memory. */
19460 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19462 ret = NO_REGS;
19463 default_p = false;
19464 done_p = true;
19467 /* If this is a scalar floating point value and we want to load it into the
19468 traditional Altivec registers, do it via a move via a traditional floating
19469 point register, unless we have D-form addressing. Also make sure that
19470 non-zero constants use a FPR. */
19471 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19472 && !mode_supports_vmx_dform (mode)
19473 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19474 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19476 ret = FLOAT_REGS;
19477 default_p = false;
19478 done_p = true;
19481 /* Handle reload of load/stores if we have reload helper functions. */
19482 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19484 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19485 mode);
19487 if (extra_cost >= 0)
19489 done_p = true;
19490 ret = NO_REGS;
19491 if (extra_cost > 0)
19493 sri->extra_cost = extra_cost;
19494 sri->icode = icode;
19499 /* Handle unaligned loads and stores of integer registers. */
19500 if (!done_p && TARGET_POWERPC64
19501 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19502 && memory_p
19503 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19505 rtx addr = XEXP (x, 0);
19506 rtx off = address_offset (addr);
19508 if (off != NULL_RTX)
19510 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19511 unsigned HOST_WIDE_INT offset = INTVAL (off);
19513 /* We need a secondary reload when our legitimate_address_p
19514 says the address is good (as otherwise the entire address
19515 will be reloaded), and the offset is not a multiple of
19516 four or we have an address wrap. Address wrap will only
19517 occur for LO_SUMs since legitimate_offset_address_p
19518 rejects addresses for 16-byte mems that will wrap. */
19519 if (GET_CODE (addr) == LO_SUM
19520 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19521 && ((offset & 3) != 0
19522 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19523 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19524 && (offset & 3) != 0))
19526 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19527 if (in_p)
19528 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19529 : CODE_FOR_reload_di_load);
19530 else
19531 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19532 : CODE_FOR_reload_di_store);
19533 sri->extra_cost = 2;
19534 ret = NO_REGS;
19535 done_p = true;
19537 else
19538 default_p = true;
19540 else
19541 default_p = true;
19544 if (!done_p && !TARGET_POWERPC64
19545 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19546 && memory_p
19547 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19549 rtx addr = XEXP (x, 0);
19550 rtx off = address_offset (addr);
19552 if (off != NULL_RTX)
19554 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19555 unsigned HOST_WIDE_INT offset = INTVAL (off);
19557 /* We need a secondary reload when our legitimate_address_p
19558 says the address is good (as otherwise the entire address
19559 will be reloaded), and we have a wrap.
19561 legitimate_lo_sum_address_p allows LO_SUM addresses to
19562 have any offset so test for wrap in the low 16 bits.
19564 legitimate_offset_address_p checks for the range
19565 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19566 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19567 [0x7ff4,0x7fff] respectively, so test for the
19568 intersection of these ranges, [0x7ffc,0x7fff] and
19569 [0x7ff4,0x7ff7] respectively.
19571 Note that the address we see here may have been
19572 manipulated by legitimize_reload_address. */
19573 if (GET_CODE (addr) == LO_SUM
19574 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19575 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19577 if (in_p)
19578 sri->icode = CODE_FOR_reload_si_load;
19579 else
19580 sri->icode = CODE_FOR_reload_si_store;
19581 sri->extra_cost = 2;
19582 ret = NO_REGS;
19583 done_p = true;
19585 else
19586 default_p = true;
19588 else
19589 default_p = true;
19592 if (!done_p)
19593 default_p = true;
19595 if (default_p)
19596 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19598 gcc_assert (ret != ALL_REGS);
19600 if (TARGET_DEBUG_ADDR)
19602 fprintf (stderr,
19603 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19604 "mode = %s",
19605 reg_class_names[ret],
19606 in_p ? "true" : "false",
19607 reg_class_names[rclass],
19608 GET_MODE_NAME (mode));
19610 if (reload_completed)
19611 fputs (", after reload", stderr);
19613 if (!done_p)
19614 fputs (", done_p not set", stderr);
19616 if (default_p)
19617 fputs (", default secondary reload", stderr);
19619 if (sri->icode != CODE_FOR_nothing)
19620 fprintf (stderr, ", reload func = %s, extra cost = %d",
19621 insn_data[sri->icode].name, sri->extra_cost);
19623 else if (sri->extra_cost > 0)
19624 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19626 fputs ("\n", stderr);
19627 debug_rtx (x);
19630 return ret;
19633 /* Better tracing for rs6000_secondary_reload_inner. */
19635 static void
19636 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19637 bool store_p)
19639 rtx set, clobber;
19641 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19643 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19644 store_p ? "store" : "load");
19646 if (store_p)
19647 set = gen_rtx_SET (mem, reg);
19648 else
19649 set = gen_rtx_SET (reg, mem);
19651 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19652 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19655 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19656 ATTRIBUTE_NORETURN;
19658 static void
19659 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19660 bool store_p)
19662 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19663 gcc_unreachable ();
19666 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19667 reload helper functions. These were identified in
19668 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19669 reload, it calls the insns:
19670 reload_<RELOAD:mode>_<P:mptrsize>_store
19671 reload_<RELOAD:mode>_<P:mptrsize>_load
19673 which in turn calls this function, to do whatever is necessary to create
19674 valid addresses. */
19676 void
19677 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19679 int regno = true_regnum (reg);
19680 machine_mode mode = GET_MODE (reg);
19681 addr_mask_type addr_mask;
19682 rtx addr;
19683 rtx new_addr;
19684 rtx op_reg, op0, op1;
19685 rtx and_op;
19686 rtx cc_clobber;
19687 rtvec rv;
19689 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19690 || !base_reg_operand (scratch, GET_MODE (scratch)))
19691 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19693 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19694 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19696 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19697 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19699 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19700 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19702 else
19703 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19705 /* Make sure the mode is valid in this register class. */
19706 if ((addr_mask & RELOAD_REG_VALID) == 0)
19707 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19709 if (TARGET_DEBUG_ADDR)
19710 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19712 new_addr = addr = XEXP (mem, 0);
19713 switch (GET_CODE (addr))
19715 /* Does the register class support auto update forms for this mode? If
19716 not, do the update now. We don't need a scratch register, since the
19717 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19718 case PRE_INC:
19719 case PRE_DEC:
19720 op_reg = XEXP (addr, 0);
19721 if (!base_reg_operand (op_reg, Pmode))
19722 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19724 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19726 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
19727 new_addr = op_reg;
19729 break;
19731 case PRE_MODIFY:
19732 op0 = XEXP (addr, 0);
19733 op1 = XEXP (addr, 1);
19734 if (!base_reg_operand (op0, Pmode)
19735 || GET_CODE (op1) != PLUS
19736 || !rtx_equal_p (op0, XEXP (op1, 0)))
19737 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19739 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19741 emit_insn (gen_rtx_SET (op0, op1));
19742 new_addr = reg;
19744 break;
19746 /* Do we need to simulate AND -16 to clear the bottom address bits used
19747 in VMX load/stores? */
19748 case AND:
19749 op0 = XEXP (addr, 0);
19750 op1 = XEXP (addr, 1);
19751 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19753 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19754 op_reg = op0;
19756 else if (GET_CODE (op1) == PLUS)
19758 emit_insn (gen_rtx_SET (scratch, op1));
19759 op_reg = scratch;
19762 else
19763 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19765 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19766 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19767 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19768 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19769 new_addr = scratch;
19771 break;
19773 /* If this is an indirect address, make sure it is a base register. */
19774 case REG:
19775 case SUBREG:
19776 if (!base_reg_operand (addr, GET_MODE (addr)))
19778 emit_insn (gen_rtx_SET (scratch, addr));
19779 new_addr = scratch;
19781 break;
19783 /* If this is an indexed address, make sure the register class can handle
19784 indexed addresses for this mode. */
19785 case PLUS:
19786 op0 = XEXP (addr, 0);
19787 op1 = XEXP (addr, 1);
19788 if (!base_reg_operand (op0, Pmode))
19789 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19791 else if (int_reg_operand (op1, Pmode))
19793 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19795 emit_insn (gen_rtx_SET (scratch, addr));
19796 new_addr = scratch;
19800 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
19802 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19803 || !quad_address_p (addr, mode, false))
19805 emit_insn (gen_rtx_SET (scratch, addr));
19806 new_addr = scratch;
19810 /* Make sure the register class can handle offset addresses. */
19811 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19813 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19815 emit_insn (gen_rtx_SET (scratch, addr));
19816 new_addr = scratch;
19820 else
19821 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19823 break;
19825 case LO_SUM:
19826 op0 = XEXP (addr, 0);
19827 op1 = XEXP (addr, 1);
19828 if (!base_reg_operand (op0, Pmode))
19829 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19831 else if (int_reg_operand (op1, Pmode))
19833 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19835 emit_insn (gen_rtx_SET (scratch, addr));
19836 new_addr = scratch;
19840 /* Quad offsets are restricted and can't handle normal addresses. */
19841 else if (mode_supports_vsx_dform_quad (mode))
19843 emit_insn (gen_rtx_SET (scratch, addr));
19844 new_addr = scratch;
19847 /* Make sure the register class can handle offset addresses. */
19848 else if (legitimate_lo_sum_address_p (mode, addr, false))
19850 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19852 emit_insn (gen_rtx_SET (scratch, addr));
19853 new_addr = scratch;
19857 else
19858 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19860 break;
19862 case SYMBOL_REF:
19863 case CONST:
19864 case LABEL_REF:
19865 rs6000_emit_move (scratch, addr, Pmode);
19866 new_addr = scratch;
19867 break;
19869 default:
19870 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19873 /* Adjust the address if it changed. */
19874 if (addr != new_addr)
19876 mem = replace_equiv_address_nv (mem, new_addr);
19877 if (TARGET_DEBUG_ADDR)
19878 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19881 /* Now create the move. */
19882 if (store_p)
19883 emit_insn (gen_rtx_SET (mem, reg));
19884 else
19885 emit_insn (gen_rtx_SET (reg, mem));
19887 return;
19890 /* Convert reloads involving 64-bit gprs and misaligned offset
19891 addressing, or multiple 32-bit gprs and offsets that are too large,
19892 to use indirect addressing. */
19894 void
19895 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19897 int regno = true_regnum (reg);
19898 enum reg_class rclass;
19899 rtx addr;
19900 rtx scratch_or_premodify = scratch;
19902 if (TARGET_DEBUG_ADDR)
19904 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19905 store_p ? "store" : "load");
19906 fprintf (stderr, "reg:\n");
19907 debug_rtx (reg);
19908 fprintf (stderr, "mem:\n");
19909 debug_rtx (mem);
19910 fprintf (stderr, "scratch:\n");
19911 debug_rtx (scratch);
19914 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
19915 gcc_assert (GET_CODE (mem) == MEM);
19916 rclass = REGNO_REG_CLASS (regno);
19917 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19918 addr = XEXP (mem, 0);
19920 if (GET_CODE (addr) == PRE_MODIFY)
19922 gcc_assert (REG_P (XEXP (addr, 0))
19923 && GET_CODE (XEXP (addr, 1)) == PLUS
19924 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19925 scratch_or_premodify = XEXP (addr, 0);
19926 if (!HARD_REGISTER_P (scratch_or_premodify))
19927 /* If we have a pseudo here then reload will have arranged
19928 to have it replaced, but only in the original insn.
19929 Use the replacement here too. */
19930 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
19932 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
19933 expressions from the original insn, without unsharing them.
19934 Any RTL that points into the original insn will of course
19935 have register replacements applied. That is why we don't
19936 need to look for replacements under the PLUS. */
19937 addr = XEXP (addr, 1);
19939 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19941 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19943 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19945 /* Now create the move. */
19946 if (store_p)
19947 emit_insn (gen_rtx_SET (mem, reg));
19948 else
19949 emit_insn (gen_rtx_SET (reg, mem));
19951 return;
19954 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
19955 this function has any SDmode references. If we are on a power7 or later, we
19956 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
19957 can load/store the value. */
19959 static void
19960 rs6000_alloc_sdmode_stack_slot (void)
19962 tree t;
19963 basic_block bb;
19964 gimple_stmt_iterator gsi;
19966 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
19967 /* We use a different approach for dealing with the secondary
19968 memory in LRA. */
19969 if (ira_use_lra_p)
19970 return;
19972 if (TARGET_NO_SDMODE_STACK)
19973 return;
19975 FOR_EACH_BB_FN (bb, cfun)
19976 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
19978 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
19979 if (ret)
19981 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
19982 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
19983 SDmode, 0);
19984 return;
19988 /* Check for any SDmode parameters of the function. */
19989 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
19991 if (TREE_TYPE (t) == error_mark_node)
19992 continue;
19994 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
19995 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
19997 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
19998 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
19999 SDmode, 0);
20000 return;
20005 static void
20006 rs6000_instantiate_decls (void)
20008 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
20009 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
20012 /* Given an rtx X being reloaded into a reg required to be
20013 in class CLASS, return the class of reg to actually use.
20014 In general this is just CLASS; but on some machines
20015 in some cases it is preferable to use a more restrictive class.
20017 On the RS/6000, we have to return NO_REGS when we want to reload a
20018 floating-point CONST_DOUBLE to force it to be copied to memory.
20020 We also don't want to reload integer values into floating-point
20021 registers if we can at all help it. In fact, this can
20022 cause reload to die, if it tries to generate a reload of CTR
20023 into a FP register and discovers it doesn't have the memory location
20024 required.
20026 ??? Would it be a good idea to have reload do the converse, that is
20027 try to reload floating modes into FP registers if possible?
20030 static enum reg_class
20031 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20033 machine_mode mode = GET_MODE (x);
20034 bool is_constant = CONSTANT_P (x);
20036 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20037 reload class for it. */
20038 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20039 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20040 return NO_REGS;
20042 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20043 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20044 return NO_REGS;
20046 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20047 the reloading of address expressions using PLUS into floating point
20048 registers. */
20049 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20051 if (is_constant)
20053 /* Zero is always allowed in all VSX registers. */
20054 if (x == CONST0_RTX (mode))
20055 return rclass;
20057 /* If this is a vector constant that can be formed with a few Altivec
20058 instructions, we want altivec registers. */
20059 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20060 return ALTIVEC_REGS;
20062 /* Force constant to memory. */
20063 return NO_REGS;
20066 /* D-form addressing can easily reload the value. */
20067 if (mode_supports_vmx_dform (mode)
20068 || mode_supports_vsx_dform_quad (mode))
20069 return rclass;
20071 /* If this is a scalar floating point value and we don't have D-form
20072 addressing, prefer the traditional floating point registers so that we
20073 can use D-form (register+offset) addressing. */
20074 if (GET_MODE_SIZE (mode) < 16 && rclass == VSX_REGS)
20075 return FLOAT_REGS;
20077 /* Prefer the Altivec registers if Altivec is handling the vector
20078 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20079 loads. */
20080 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20081 || mode == V1TImode)
20082 return ALTIVEC_REGS;
20084 return rclass;
20087 if (is_constant || GET_CODE (x) == PLUS)
20089 if (reg_class_subset_p (GENERAL_REGS, rclass))
20090 return GENERAL_REGS;
20091 if (reg_class_subset_p (BASE_REGS, rclass))
20092 return BASE_REGS;
20093 return NO_REGS;
20096 /* If we haven't picked a register class, and the type is a vector or
20097 floating point type, prefer to use the VSX, FPR, or Altivec register
20098 classes. */
20099 if (rclass == NO_REGS)
20101 if (TARGET_VSX && VECTOR_MEM_VSX_OR_P8_VECTOR_P (mode))
20102 return VSX_REGS;
20104 if (TARGET_ALTIVEC && VECTOR_MEM_ALTIVEC_P (mode))
20105 return ALTIVEC_REGS;
20107 if (DECIMAL_FLOAT_MODE_P (mode))
20108 return TARGET_DFP ? FLOAT_REGS : NO_REGS;
20110 if (TARGET_FPRS && TARGET_HARD_FLOAT && FLOAT_MODE_P (mode)
20111 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20112 return FLOAT_REGS;
20115 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20116 return GENERAL_REGS;
20118 return rclass;
20121 /* Debug version of rs6000_preferred_reload_class. */
20122 static enum reg_class
20123 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20125 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20127 fprintf (stderr,
20128 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20129 "mode = %s, x:\n",
20130 reg_class_names[ret], reg_class_names[rclass],
20131 GET_MODE_NAME (GET_MODE (x)));
20132 debug_rtx (x);
20134 return ret;
20137 /* If we are copying between FP or AltiVec registers and anything else, we need
20138 a memory location. The exception is when we are targeting ppc64 and the
20139 move to/from fpr to gpr instructions are available. Also, under VSX, you
20140 can copy vector registers from the FP register set to the Altivec register
20141 set and vice versa. */
20143 static bool
20144 rs6000_secondary_memory_needed (enum reg_class from_class,
20145 enum reg_class to_class,
20146 machine_mode mode)
20148 enum rs6000_reg_type from_type, to_type;
20149 bool altivec_p = ((from_class == ALTIVEC_REGS)
20150 || (to_class == ALTIVEC_REGS));
20152 /* If a simple/direct move is available, we don't need secondary memory */
20153 from_type = reg_class_to_reg_type[(int)from_class];
20154 to_type = reg_class_to_reg_type[(int)to_class];
20156 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20157 (secondary_reload_info *)0, altivec_p))
20158 return false;
20160 /* If we have a floating point or vector register class, we need to use
20161 memory to transfer the data. */
20162 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20163 return true;
20165 return false;
20168 /* Debug version of rs6000_secondary_memory_needed. */
20169 static bool
20170 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
20171 enum reg_class to_class,
20172 machine_mode mode)
20174 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
20176 fprintf (stderr,
20177 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20178 "to_class = %s, mode = %s\n",
20179 ret ? "true" : "false",
20180 reg_class_names[from_class],
20181 reg_class_names[to_class],
20182 GET_MODE_NAME (mode));
20184 return ret;
20187 /* Return the register class of a scratch register needed to copy IN into
20188 or out of a register in RCLASS in MODE. If it can be done directly,
20189 NO_REGS is returned. */
20191 static enum reg_class
20192 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20193 rtx in)
20195 int regno;
20197 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20198 #if TARGET_MACHO
20199 && MACHOPIC_INDIRECT
20200 #endif
20203 /* We cannot copy a symbolic operand directly into anything
20204 other than BASE_REGS for TARGET_ELF. So indicate that a
20205 register from BASE_REGS is needed as an intermediate
20206 register.
20208 On Darwin, pic addresses require a load from memory, which
20209 needs a base register. */
20210 if (rclass != BASE_REGS
20211 && (GET_CODE (in) == SYMBOL_REF
20212 || GET_CODE (in) == HIGH
20213 || GET_CODE (in) == LABEL_REF
20214 || GET_CODE (in) == CONST))
20215 return BASE_REGS;
20218 if (GET_CODE (in) == REG)
20220 regno = REGNO (in);
20221 if (regno >= FIRST_PSEUDO_REGISTER)
20223 regno = true_regnum (in);
20224 if (regno >= FIRST_PSEUDO_REGISTER)
20225 regno = -1;
20228 else if (GET_CODE (in) == SUBREG)
20230 regno = true_regnum (in);
20231 if (regno >= FIRST_PSEUDO_REGISTER)
20232 regno = -1;
20234 else
20235 regno = -1;
20237 /* If we have VSX register moves, prefer moving scalar values between
20238 Altivec registers and GPR by going via an FPR (and then via memory)
20239 instead of reloading the secondary memory address for Altivec moves. */
20240 if (TARGET_VSX
20241 && GET_MODE_SIZE (mode) < 16
20242 && !mode_supports_vmx_dform (mode)
20243 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20244 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20245 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20246 && (regno >= 0 && INT_REGNO_P (regno)))))
20247 return FLOAT_REGS;
20249 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20250 into anything. */
20251 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20252 || (regno >= 0 && INT_REGNO_P (regno)))
20253 return NO_REGS;
20255 /* Constants, memory, and VSX registers can go into VSX registers (both the
20256 traditional floating point and the altivec registers). */
20257 if (rclass == VSX_REGS
20258 && (regno == -1 || VSX_REGNO_P (regno)))
20259 return NO_REGS;
20261 /* Constants, memory, and FP registers can go into FP registers. */
20262 if ((regno == -1 || FP_REGNO_P (regno))
20263 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20264 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20266 /* Memory, and AltiVec registers can go into AltiVec registers. */
20267 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20268 && rclass == ALTIVEC_REGS)
20269 return NO_REGS;
20271 /* We can copy among the CR registers. */
20272 if ((rclass == CR_REGS || rclass == CR0_REGS)
20273 && regno >= 0 && CR_REGNO_P (regno))
20274 return NO_REGS;
20276 /* Otherwise, we need GENERAL_REGS. */
20277 return GENERAL_REGS;
20280 /* Debug version of rs6000_secondary_reload_class. */
20281 static enum reg_class
20282 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20283 machine_mode mode, rtx in)
20285 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20286 fprintf (stderr,
20287 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20288 "mode = %s, input rtx:\n",
20289 reg_class_names[ret], reg_class_names[rclass],
20290 GET_MODE_NAME (mode));
20291 debug_rtx (in);
20293 return ret;
20296 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20298 static bool
20299 rs6000_cannot_change_mode_class (machine_mode from,
20300 machine_mode to,
20301 enum reg_class rclass)
20303 unsigned from_size = GET_MODE_SIZE (from);
20304 unsigned to_size = GET_MODE_SIZE (to);
20306 if (from_size != to_size)
20308 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20310 if (reg_classes_intersect_p (xclass, rclass))
20312 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
20313 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
20314 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20315 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20317 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20318 single register under VSX because the scalar part of the register
20319 is in the upper 64-bits, and not the lower 64-bits. Types like
20320 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20321 IEEE floating point can't overlap, and neither can small
20322 values. */
20324 if (to_float128_vector_p && from_float128_vector_p)
20325 return false;
20327 else if (to_float128_vector_p || from_float128_vector_p)
20328 return true;
20330 /* TDmode in floating-mode registers must always go into a register
20331 pair with the most significant word in the even-numbered register
20332 to match ISA requirements. In little-endian mode, this does not
20333 match subreg numbering, so we cannot allow subregs. */
20334 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20335 return true;
20337 if (from_size < 8 || to_size < 8)
20338 return true;
20340 if (from_size == 8 && (8 * to_nregs) != to_size)
20341 return true;
20343 if (to_size == 8 && (8 * from_nregs) != from_size)
20344 return true;
20346 return false;
20348 else
20349 return false;
20352 if (TARGET_E500_DOUBLE
20353 && ((((to) == DFmode) + ((from) == DFmode)) == 1
20354 || (((to) == TFmode) + ((from) == TFmode)) == 1
20355 || (((to) == IFmode) + ((from) == IFmode)) == 1
20356 || (((to) == KFmode) + ((from) == KFmode)) == 1
20357 || (((to) == DDmode) + ((from) == DDmode)) == 1
20358 || (((to) == TDmode) + ((from) == TDmode)) == 1
20359 || (((to) == DImode) + ((from) == DImode)) == 1))
20360 return true;
20362 /* Since the VSX register set includes traditional floating point registers
20363 and altivec registers, just check for the size being different instead of
20364 trying to check whether the modes are vector modes. Otherwise it won't
20365 allow say DF and DI to change classes. For types like TFmode and TDmode
20366 that take 2 64-bit registers, rather than a single 128-bit register, don't
20367 allow subregs of those types to other 128 bit types. */
20368 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20370 unsigned num_regs = (from_size + 15) / 16;
20371 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
20372 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
20373 return true;
20375 return (from_size != 8 && from_size != 16);
20378 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20379 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20380 return true;
20382 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
20383 && reg_classes_intersect_p (GENERAL_REGS, rclass))
20384 return true;
20386 return false;
20389 /* Debug version of rs6000_cannot_change_mode_class. */
20390 static bool
20391 rs6000_debug_cannot_change_mode_class (machine_mode from,
20392 machine_mode to,
20393 enum reg_class rclass)
20395 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
20397 fprintf (stderr,
20398 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20399 "to = %s, rclass = %s\n",
20400 ret ? "true" : "false",
20401 GET_MODE_NAME (from), GET_MODE_NAME (to),
20402 reg_class_names[rclass]);
20404 return ret;
20407 /* Return a string to do a move operation of 128 bits of data. */
20409 const char *
20410 rs6000_output_move_128bit (rtx operands[])
20412 rtx dest = operands[0];
20413 rtx src = operands[1];
20414 machine_mode mode = GET_MODE (dest);
20415 int dest_regno;
20416 int src_regno;
20417 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20418 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20420 if (REG_P (dest))
20422 dest_regno = REGNO (dest);
20423 dest_gpr_p = INT_REGNO_P (dest_regno);
20424 dest_fp_p = FP_REGNO_P (dest_regno);
20425 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20426 dest_vsx_p = dest_fp_p | dest_vmx_p;
20428 else
20430 dest_regno = -1;
20431 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20434 if (REG_P (src))
20436 src_regno = REGNO (src);
20437 src_gpr_p = INT_REGNO_P (src_regno);
20438 src_fp_p = FP_REGNO_P (src_regno);
20439 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20440 src_vsx_p = src_fp_p | src_vmx_p;
20442 else
20444 src_regno = -1;
20445 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20448 /* Register moves. */
20449 if (dest_regno >= 0 && src_regno >= 0)
20451 if (dest_gpr_p)
20453 if (src_gpr_p)
20454 return "#";
20456 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20457 return (WORDS_BIG_ENDIAN
20458 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20459 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20461 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20462 return "#";
20465 else if (TARGET_VSX && dest_vsx_p)
20467 if (src_vsx_p)
20468 return "xxlor %x0,%x1,%x1";
20470 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20471 return (WORDS_BIG_ENDIAN
20472 ? "mtvsrdd %x0,%1,%L1"
20473 : "mtvsrdd %x0,%L1,%1");
20475 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20476 return "#";
20479 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20480 return "vor %0,%1,%1";
20482 else if (dest_fp_p && src_fp_p)
20483 return "#";
20486 /* Loads. */
20487 else if (dest_regno >= 0 && MEM_P (src))
20489 if (dest_gpr_p)
20491 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20492 return "lq %0,%1";
20493 else
20494 return "#";
20497 else if (TARGET_ALTIVEC && dest_vmx_p
20498 && altivec_indexed_or_indirect_operand (src, mode))
20499 return "lvx %0,%y1";
20501 else if (TARGET_VSX && dest_vsx_p)
20503 if (mode_supports_vsx_dform_quad (mode)
20504 && quad_address_p (XEXP (src, 0), mode, true))
20505 return "lxv %x0,%1";
20507 else if (TARGET_P9_VECTOR)
20508 return "lxvx %x0,%y1";
20510 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20511 return "lxvw4x %x0,%y1";
20513 else
20514 return "lxvd2x %x0,%y1";
20517 else if (TARGET_ALTIVEC && dest_vmx_p)
20518 return "lvx %0,%y1";
20520 else if (dest_fp_p)
20521 return "#";
20524 /* Stores. */
20525 else if (src_regno >= 0 && MEM_P (dest))
20527 if (src_gpr_p)
20529 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20530 return "stq %1,%0";
20531 else
20532 return "#";
20535 else if (TARGET_ALTIVEC && src_vmx_p
20536 && altivec_indexed_or_indirect_operand (src, mode))
20537 return "stvx %1,%y0";
20539 else if (TARGET_VSX && src_vsx_p)
20541 if (mode_supports_vsx_dform_quad (mode)
20542 && quad_address_p (XEXP (dest, 0), mode, true))
20543 return "stxv %x1,%0";
20545 else if (TARGET_P9_VECTOR)
20546 return "stxvx %x1,%y0";
20548 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20549 return "stxvw4x %x1,%y0";
20551 else
20552 return "stxvd2x %x1,%y0";
20555 else if (TARGET_ALTIVEC && src_vmx_p)
20556 return "stvx %1,%y0";
20558 else if (src_fp_p)
20559 return "#";
20562 /* Constants. */
20563 else if (dest_regno >= 0
20564 && (GET_CODE (src) == CONST_INT
20565 || GET_CODE (src) == CONST_WIDE_INT
20566 || GET_CODE (src) == CONST_DOUBLE
20567 || GET_CODE (src) == CONST_VECTOR))
20569 if (dest_gpr_p)
20570 return "#";
20572 else if ((dest_vmx_p && TARGET_ALTIVEC)
20573 || (dest_vsx_p && TARGET_VSX))
20574 return output_vec_const_move (operands);
20577 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20580 /* Validate a 128-bit move. */
20581 bool
20582 rs6000_move_128bit_ok_p (rtx operands[])
20584 machine_mode mode = GET_MODE (operands[0]);
20585 return (gpc_reg_operand (operands[0], mode)
20586 || gpc_reg_operand (operands[1], mode));
20589 /* Return true if a 128-bit move needs to be split. */
20590 bool
20591 rs6000_split_128bit_ok_p (rtx operands[])
20593 if (!reload_completed)
20594 return false;
20596 if (!gpr_or_gpr_p (operands[0], operands[1]))
20597 return false;
20599 if (quad_load_store_p (operands[0], operands[1]))
20600 return false;
20602 return true;
20606 /* Given a comparison operation, return the bit number in CCR to test. We
20607 know this is a valid comparison.
20609 SCC_P is 1 if this is for an scc. That means that %D will have been
20610 used instead of %C, so the bits will be in different places.
20612 Return -1 if OP isn't a valid comparison for some reason. */
20615 ccr_bit (rtx op, int scc_p)
20617 enum rtx_code code = GET_CODE (op);
20618 machine_mode cc_mode;
20619 int cc_regnum;
20620 int base_bit;
20621 rtx reg;
20623 if (!COMPARISON_P (op))
20624 return -1;
20626 reg = XEXP (op, 0);
20628 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20630 cc_mode = GET_MODE (reg);
20631 cc_regnum = REGNO (reg);
20632 base_bit = 4 * (cc_regnum - CR0_REGNO);
20634 validate_condition_mode (code, cc_mode);
20636 /* When generating a sCOND operation, only positive conditions are
20637 allowed. */
20638 gcc_assert (!scc_p
20639 || code == EQ || code == GT || code == LT || code == UNORDERED
20640 || code == GTU || code == LTU);
20642 switch (code)
20644 case NE:
20645 return scc_p ? base_bit + 3 : base_bit + 2;
20646 case EQ:
20647 return base_bit + 2;
20648 case GT: case GTU: case UNLE:
20649 return base_bit + 1;
20650 case LT: case LTU: case UNGE:
20651 return base_bit;
20652 case ORDERED: case UNORDERED:
20653 return base_bit + 3;
20655 case GE: case GEU:
20656 /* If scc, we will have done a cror to put the bit in the
20657 unordered position. So test that bit. For integer, this is ! LT
20658 unless this is an scc insn. */
20659 return scc_p ? base_bit + 3 : base_bit;
20661 case LE: case LEU:
20662 return scc_p ? base_bit + 3 : base_bit + 1;
20664 default:
20665 gcc_unreachable ();
20669 /* Return the GOT register. */
20672 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20674 /* The second flow pass currently (June 1999) can't update
20675 regs_ever_live without disturbing other parts of the compiler, so
20676 update it here to make the prolog/epilogue code happy. */
20677 if (!can_create_pseudo_p ()
20678 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20679 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20681 crtl->uses_pic_offset_table = 1;
20683 return pic_offset_table_rtx;
20686 static rs6000_stack_t stack_info;
20688 /* Function to init struct machine_function.
20689 This will be called, via a pointer variable,
20690 from push_function_context. */
20692 static struct machine_function *
20693 rs6000_init_machine_status (void)
20695 stack_info.reload_completed = 0;
20696 return ggc_cleared_alloc<machine_function> ();
20699 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20701 /* Write out a function code label. */
20703 void
20704 rs6000_output_function_entry (FILE *file, const char *fname)
20706 if (fname[0] != '.')
20708 switch (DEFAULT_ABI)
20710 default:
20711 gcc_unreachable ();
20713 case ABI_AIX:
20714 if (DOT_SYMBOLS)
20715 putc ('.', file);
20716 else
20717 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20718 break;
20720 case ABI_ELFv2:
20721 case ABI_V4:
20722 case ABI_DARWIN:
20723 break;
20727 RS6000_OUTPUT_BASENAME (file, fname);
20730 /* Print an operand. Recognize special options, documented below. */
20732 #if TARGET_ELF
20733 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20734 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20735 #else
20736 #define SMALL_DATA_RELOC "sda21"
20737 #define SMALL_DATA_REG 0
20738 #endif
20740 void
20741 print_operand (FILE *file, rtx x, int code)
20743 int i;
20744 unsigned HOST_WIDE_INT uval;
20746 switch (code)
20748 /* %a is output_address. */
20750 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20751 output_operand. */
20753 case 'D':
20754 /* Like 'J' but get to the GT bit only. */
20755 gcc_assert (REG_P (x));
20757 /* Bit 1 is GT bit. */
20758 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20760 /* Add one for shift count in rlinm for scc. */
20761 fprintf (file, "%d", i + 1);
20762 return;
20764 case 'e':
20765 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20766 if (! INT_P (x))
20768 output_operand_lossage ("invalid %%e value");
20769 return;
20772 uval = INTVAL (x);
20773 if ((uval & 0xffff) == 0 && uval != 0)
20774 putc ('s', file);
20775 return;
20777 case 'E':
20778 /* X is a CR register. Print the number of the EQ bit of the CR */
20779 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20780 output_operand_lossage ("invalid %%E value");
20781 else
20782 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20783 return;
20785 case 'f':
20786 /* X is a CR register. Print the shift count needed to move it
20787 to the high-order four bits. */
20788 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20789 output_operand_lossage ("invalid %%f value");
20790 else
20791 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20792 return;
20794 case 'F':
20795 /* Similar, but print the count for the rotate in the opposite
20796 direction. */
20797 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20798 output_operand_lossage ("invalid %%F value");
20799 else
20800 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20801 return;
20803 case 'G':
20804 /* X is a constant integer. If it is negative, print "m",
20805 otherwise print "z". This is to make an aze or ame insn. */
20806 if (GET_CODE (x) != CONST_INT)
20807 output_operand_lossage ("invalid %%G value");
20808 else if (INTVAL (x) >= 0)
20809 putc ('z', file);
20810 else
20811 putc ('m', file);
20812 return;
20814 case 'h':
20815 /* If constant, output low-order five bits. Otherwise, write
20816 normally. */
20817 if (INT_P (x))
20818 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20819 else
20820 print_operand (file, x, 0);
20821 return;
20823 case 'H':
20824 /* If constant, output low-order six bits. Otherwise, write
20825 normally. */
20826 if (INT_P (x))
20827 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20828 else
20829 print_operand (file, x, 0);
20830 return;
20832 case 'I':
20833 /* Print `i' if this is a constant, else nothing. */
20834 if (INT_P (x))
20835 putc ('i', file);
20836 return;
20838 case 'j':
20839 /* Write the bit number in CCR for jump. */
20840 i = ccr_bit (x, 0);
20841 if (i == -1)
20842 output_operand_lossage ("invalid %%j code");
20843 else
20844 fprintf (file, "%d", i);
20845 return;
20847 case 'J':
20848 /* Similar, but add one for shift count in rlinm for scc and pass
20849 scc flag to `ccr_bit'. */
20850 i = ccr_bit (x, 1);
20851 if (i == -1)
20852 output_operand_lossage ("invalid %%J code");
20853 else
20854 /* If we want bit 31, write a shift count of zero, not 32. */
20855 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20856 return;
20858 case 'k':
20859 /* X must be a constant. Write the 1's complement of the
20860 constant. */
20861 if (! INT_P (x))
20862 output_operand_lossage ("invalid %%k value");
20863 else
20864 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20865 return;
20867 case 'K':
20868 /* X must be a symbolic constant on ELF. Write an
20869 expression suitable for an 'addi' that adds in the low 16
20870 bits of the MEM. */
20871 if (GET_CODE (x) == CONST)
20873 if (GET_CODE (XEXP (x, 0)) != PLUS
20874 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20875 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20876 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20877 output_operand_lossage ("invalid %%K value");
20879 print_operand_address (file, x);
20880 fputs ("@l", file);
20881 return;
20883 /* %l is output_asm_label. */
20885 case 'L':
20886 /* Write second word of DImode or DFmode reference. Works on register
20887 or non-indexed memory only. */
20888 if (REG_P (x))
20889 fputs (reg_names[REGNO (x) + 1], file);
20890 else if (MEM_P (x))
20892 machine_mode mode = GET_MODE (x);
20893 /* Handle possible auto-increment. Since it is pre-increment and
20894 we have already done it, we can just use an offset of word. */
20895 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20896 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20897 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20898 UNITS_PER_WORD));
20899 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20900 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20901 UNITS_PER_WORD));
20902 else
20903 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20904 UNITS_PER_WORD),
20905 0));
20907 if (small_data_operand (x, GET_MODE (x)))
20908 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20909 reg_names[SMALL_DATA_REG]);
20911 return;
20913 case 'N':
20914 /* Write the number of elements in the vector times 4. */
20915 if (GET_CODE (x) != PARALLEL)
20916 output_operand_lossage ("invalid %%N value");
20917 else
20918 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20919 return;
20921 case 'O':
20922 /* Similar, but subtract 1 first. */
20923 if (GET_CODE (x) != PARALLEL)
20924 output_operand_lossage ("invalid %%O value");
20925 else
20926 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20927 return;
20929 case 'p':
20930 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20931 if (! INT_P (x)
20932 || INTVAL (x) < 0
20933 || (i = exact_log2 (INTVAL (x))) < 0)
20934 output_operand_lossage ("invalid %%p value");
20935 else
20936 fprintf (file, "%d", i);
20937 return;
20939 case 'P':
20940 /* The operand must be an indirect memory reference. The result
20941 is the register name. */
20942 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20943 || REGNO (XEXP (x, 0)) >= 32)
20944 output_operand_lossage ("invalid %%P value");
20945 else
20946 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20947 return;
20949 case 'q':
20950 /* This outputs the logical code corresponding to a boolean
20951 expression. The expression may have one or both operands
20952 negated (if one, only the first one). For condition register
20953 logical operations, it will also treat the negated
20954 CR codes as NOTs, but not handle NOTs of them. */
20956 const char *const *t = 0;
20957 const char *s;
20958 enum rtx_code code = GET_CODE (x);
20959 static const char * const tbl[3][3] = {
20960 { "and", "andc", "nor" },
20961 { "or", "orc", "nand" },
20962 { "xor", "eqv", "xor" } };
20964 if (code == AND)
20965 t = tbl[0];
20966 else if (code == IOR)
20967 t = tbl[1];
20968 else if (code == XOR)
20969 t = tbl[2];
20970 else
20971 output_operand_lossage ("invalid %%q value");
20973 if (GET_CODE (XEXP (x, 0)) != NOT)
20974 s = t[0];
20975 else
20977 if (GET_CODE (XEXP (x, 1)) == NOT)
20978 s = t[2];
20979 else
20980 s = t[1];
20983 fputs (s, file);
20985 return;
20987 case 'Q':
20988 if (! TARGET_MFCRF)
20989 return;
20990 fputc (',', file);
20991 /* FALLTHRU */
20993 case 'R':
20994 /* X is a CR register. Print the mask for `mtcrf'. */
20995 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20996 output_operand_lossage ("invalid %%R value");
20997 else
20998 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20999 return;
21001 case 's':
21002 /* Low 5 bits of 32 - value */
21003 if (! INT_P (x))
21004 output_operand_lossage ("invalid %%s value");
21005 else
21006 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21007 return;
21009 case 't':
21010 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21011 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21013 /* Bit 3 is OV bit. */
21014 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21016 /* If we want bit 31, write a shift count of zero, not 32. */
21017 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21018 return;
21020 case 'T':
21021 /* Print the symbolic name of a branch target register. */
21022 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21023 && REGNO (x) != CTR_REGNO))
21024 output_operand_lossage ("invalid %%T value");
21025 else if (REGNO (x) == LR_REGNO)
21026 fputs ("lr", file);
21027 else
21028 fputs ("ctr", file);
21029 return;
21031 case 'u':
21032 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21033 for use in unsigned operand. */
21034 if (! INT_P (x))
21036 output_operand_lossage ("invalid %%u value");
21037 return;
21040 uval = INTVAL (x);
21041 if ((uval & 0xffff) == 0)
21042 uval >>= 16;
21044 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21045 return;
21047 case 'v':
21048 /* High-order 16 bits of constant for use in signed operand. */
21049 if (! INT_P (x))
21050 output_operand_lossage ("invalid %%v value");
21051 else
21052 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21053 (INTVAL (x) >> 16) & 0xffff);
21054 return;
21056 case 'U':
21057 /* Print `u' if this has an auto-increment or auto-decrement. */
21058 if (MEM_P (x)
21059 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21060 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21061 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21062 putc ('u', file);
21063 return;
21065 case 'V':
21066 /* Print the trap code for this operand. */
21067 switch (GET_CODE (x))
21069 case EQ:
21070 fputs ("eq", file); /* 4 */
21071 break;
21072 case NE:
21073 fputs ("ne", file); /* 24 */
21074 break;
21075 case LT:
21076 fputs ("lt", file); /* 16 */
21077 break;
21078 case LE:
21079 fputs ("le", file); /* 20 */
21080 break;
21081 case GT:
21082 fputs ("gt", file); /* 8 */
21083 break;
21084 case GE:
21085 fputs ("ge", file); /* 12 */
21086 break;
21087 case LTU:
21088 fputs ("llt", file); /* 2 */
21089 break;
21090 case LEU:
21091 fputs ("lle", file); /* 6 */
21092 break;
21093 case GTU:
21094 fputs ("lgt", file); /* 1 */
21095 break;
21096 case GEU:
21097 fputs ("lge", file); /* 5 */
21098 break;
21099 default:
21100 gcc_unreachable ();
21102 break;
21104 case 'w':
21105 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21106 normally. */
21107 if (INT_P (x))
21108 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21109 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21110 else
21111 print_operand (file, x, 0);
21112 return;
21114 case 'x':
21115 /* X is a FPR or Altivec register used in a VSX context. */
21116 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21117 output_operand_lossage ("invalid %%x value");
21118 else
21120 int reg = REGNO (x);
21121 int vsx_reg = (FP_REGNO_P (reg)
21122 ? reg - 32
21123 : reg - FIRST_ALTIVEC_REGNO + 32);
21125 #ifdef TARGET_REGNAMES
21126 if (TARGET_REGNAMES)
21127 fprintf (file, "%%vs%d", vsx_reg);
21128 else
21129 #endif
21130 fprintf (file, "%d", vsx_reg);
21132 return;
21134 case 'X':
21135 if (MEM_P (x)
21136 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21137 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21138 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21139 putc ('x', file);
21140 return;
21142 case 'Y':
21143 /* Like 'L', for third word of TImode/PTImode */
21144 if (REG_P (x))
21145 fputs (reg_names[REGNO (x) + 2], file);
21146 else if (MEM_P (x))
21148 machine_mode mode = GET_MODE (x);
21149 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21150 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21151 output_address (mode, plus_constant (Pmode,
21152 XEXP (XEXP (x, 0), 0), 8));
21153 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21154 output_address (mode, plus_constant (Pmode,
21155 XEXP (XEXP (x, 0), 0), 8));
21156 else
21157 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21158 if (small_data_operand (x, GET_MODE (x)))
21159 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21160 reg_names[SMALL_DATA_REG]);
21162 return;
21164 case 'z':
21165 /* X is a SYMBOL_REF. Write out the name preceded by a
21166 period and without any trailing data in brackets. Used for function
21167 names. If we are configured for System V (or the embedded ABI) on
21168 the PowerPC, do not emit the period, since those systems do not use
21169 TOCs and the like. */
21170 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21172 /* For macho, check to see if we need a stub. */
21173 if (TARGET_MACHO)
21175 const char *name = XSTR (x, 0);
21176 #if TARGET_MACHO
21177 if (darwin_emit_branch_islands
21178 && MACHOPIC_INDIRECT
21179 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21180 name = machopic_indirection_name (x, /*stub_p=*/true);
21181 #endif
21182 assemble_name (file, name);
21184 else if (!DOT_SYMBOLS)
21185 assemble_name (file, XSTR (x, 0));
21186 else
21187 rs6000_output_function_entry (file, XSTR (x, 0));
21188 return;
21190 case 'Z':
21191 /* Like 'L', for last word of TImode/PTImode. */
21192 if (REG_P (x))
21193 fputs (reg_names[REGNO (x) + 3], file);
21194 else if (MEM_P (x))
21196 machine_mode mode = GET_MODE (x);
21197 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21198 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21199 output_address (mode, plus_constant (Pmode,
21200 XEXP (XEXP (x, 0), 0), 12));
21201 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21202 output_address (mode, plus_constant (Pmode,
21203 XEXP (XEXP (x, 0), 0), 12));
21204 else
21205 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21206 if (small_data_operand (x, GET_MODE (x)))
21207 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21208 reg_names[SMALL_DATA_REG]);
21210 return;
21212 /* Print AltiVec or SPE memory operand. */
21213 case 'y':
21215 rtx tmp;
21217 gcc_assert (MEM_P (x));
21219 tmp = XEXP (x, 0);
21221 /* Ugly hack because %y is overloaded. */
21222 if ((TARGET_SPE || TARGET_E500_DOUBLE)
21223 && (GET_MODE_SIZE (GET_MODE (x)) == 8
21224 || FLOAT128_2REG_P (GET_MODE (x))
21225 || GET_MODE (x) == TImode
21226 || GET_MODE (x) == PTImode))
21228 /* Handle [reg]. */
21229 if (REG_P (tmp))
21231 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
21232 break;
21234 /* Handle [reg+UIMM]. */
21235 else if (GET_CODE (tmp) == PLUS &&
21236 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
21238 int x;
21240 gcc_assert (REG_P (XEXP (tmp, 0)));
21242 x = INTVAL (XEXP (tmp, 1));
21243 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
21244 break;
21247 /* Fall through. Must be [reg+reg]. */
21249 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21250 && GET_CODE (tmp) == AND
21251 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21252 && INTVAL (XEXP (tmp, 1)) == -16)
21253 tmp = XEXP (tmp, 0);
21254 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21255 && GET_CODE (tmp) == PRE_MODIFY)
21256 tmp = XEXP (tmp, 1);
21257 if (REG_P (tmp))
21258 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21259 else
21261 if (GET_CODE (tmp) != PLUS
21262 || !REG_P (XEXP (tmp, 0))
21263 || !REG_P (XEXP (tmp, 1)))
21265 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21266 break;
21269 if (REGNO (XEXP (tmp, 0)) == 0)
21270 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21271 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21272 else
21273 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21274 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21276 break;
21279 case 0:
21280 if (REG_P (x))
21281 fprintf (file, "%s", reg_names[REGNO (x)]);
21282 else if (MEM_P (x))
21284 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21285 know the width from the mode. */
21286 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21287 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21288 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21289 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21290 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21291 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21292 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21293 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21294 else
21295 output_address (GET_MODE (x), XEXP (x, 0));
21297 else
21299 if (toc_relative_expr_p (x, false))
21300 /* This hack along with a corresponding hack in
21301 rs6000_output_addr_const_extra arranges to output addends
21302 where the assembler expects to find them. eg.
21303 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21304 without this hack would be output as "x@toc+4". We
21305 want "x+4@toc". */
21306 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
21307 else
21308 output_addr_const (file, x);
21310 return;
21312 case '&':
21313 if (const char *name = get_some_local_dynamic_name ())
21314 assemble_name (file, name);
21315 else
21316 output_operand_lossage ("'%%&' used without any "
21317 "local dynamic TLS references");
21318 return;
21320 default:
21321 output_operand_lossage ("invalid %%xn code");
21325 /* Print the address of an operand. */
21327 void
21328 print_operand_address (FILE *file, rtx x)
21330 if (REG_P (x))
21331 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21332 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21333 || GET_CODE (x) == LABEL_REF)
21335 output_addr_const (file, x);
21336 if (small_data_operand (x, GET_MODE (x)))
21337 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21338 reg_names[SMALL_DATA_REG]);
21339 else
21340 gcc_assert (!TARGET_TOC);
21342 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21343 && REG_P (XEXP (x, 1)))
21345 if (REGNO (XEXP (x, 0)) == 0)
21346 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21347 reg_names[ REGNO (XEXP (x, 0)) ]);
21348 else
21349 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21350 reg_names[ REGNO (XEXP (x, 1)) ]);
21352 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21353 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21354 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21355 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21356 #if TARGET_MACHO
21357 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21358 && CONSTANT_P (XEXP (x, 1)))
21360 fprintf (file, "lo16(");
21361 output_addr_const (file, XEXP (x, 1));
21362 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21364 #endif
21365 #if TARGET_ELF
21366 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21367 && CONSTANT_P (XEXP (x, 1)))
21369 output_addr_const (file, XEXP (x, 1));
21370 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21372 #endif
21373 else if (toc_relative_expr_p (x, false))
21375 /* This hack along with a corresponding hack in
21376 rs6000_output_addr_const_extra arranges to output addends
21377 where the assembler expects to find them. eg.
21378 (lo_sum (reg 9)
21379 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21380 without this hack would be output as "x@toc+8@l(9)". We
21381 want "x+8@toc@l(9)". */
21382 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
21383 if (GET_CODE (x) == LO_SUM)
21384 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21385 else
21386 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
21388 else
21389 gcc_unreachable ();
21392 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
21394 static bool
21395 rs6000_output_addr_const_extra (FILE *file, rtx x)
21397 if (GET_CODE (x) == UNSPEC)
21398 switch (XINT (x, 1))
21400 case UNSPEC_TOCREL:
21401 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21402 && REG_P (XVECEXP (x, 0, 1))
21403 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21404 output_addr_const (file, XVECEXP (x, 0, 0));
21405 if (x == tocrel_base && tocrel_offset != const0_rtx)
21407 if (INTVAL (tocrel_offset) >= 0)
21408 fprintf (file, "+");
21409 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
21411 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21413 putc ('-', file);
21414 assemble_name (file, toc_label_name);
21415 need_toc_init = 1;
21417 else if (TARGET_ELF)
21418 fputs ("@toc", file);
21419 return true;
21421 #if TARGET_MACHO
21422 case UNSPEC_MACHOPIC_OFFSET:
21423 output_addr_const (file, XVECEXP (x, 0, 0));
21424 putc ('-', file);
21425 machopic_output_function_base_name (file);
21426 return true;
21427 #endif
21429 return false;
21432 /* Target hook for assembling integer objects. The PowerPC version has
21433 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21434 is defined. It also needs to handle DI-mode objects on 64-bit
21435 targets. */
21437 static bool
21438 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21440 #ifdef RELOCATABLE_NEEDS_FIXUP
21441 /* Special handling for SI values. */
21442 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21444 static int recurse = 0;
21446 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21447 the .fixup section. Since the TOC section is already relocated, we
21448 don't need to mark it here. We used to skip the text section, but it
21449 should never be valid for relocated addresses to be placed in the text
21450 section. */
21451 if (DEFAULT_ABI == ABI_V4
21452 && (TARGET_RELOCATABLE || flag_pic > 1)
21453 && in_section != toc_section
21454 && !recurse
21455 && !CONST_SCALAR_INT_P (x)
21456 && CONSTANT_P (x))
21458 char buf[256];
21460 recurse = 1;
21461 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21462 fixuplabelno++;
21463 ASM_OUTPUT_LABEL (asm_out_file, buf);
21464 fprintf (asm_out_file, "\t.long\t(");
21465 output_addr_const (asm_out_file, x);
21466 fprintf (asm_out_file, ")@fixup\n");
21467 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21468 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21469 fprintf (asm_out_file, "\t.long\t");
21470 assemble_name (asm_out_file, buf);
21471 fprintf (asm_out_file, "\n\t.previous\n");
21472 recurse = 0;
21473 return true;
21475 /* Remove initial .'s to turn a -mcall-aixdesc function
21476 address into the address of the descriptor, not the function
21477 itself. */
21478 else if (GET_CODE (x) == SYMBOL_REF
21479 && XSTR (x, 0)[0] == '.'
21480 && DEFAULT_ABI == ABI_AIX)
21482 const char *name = XSTR (x, 0);
21483 while (*name == '.')
21484 name++;
21486 fprintf (asm_out_file, "\t.long\t%s\n", name);
21487 return true;
21490 #endif /* RELOCATABLE_NEEDS_FIXUP */
21491 return default_assemble_integer (x, size, aligned_p);
21494 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21495 /* Emit an assembler directive to set symbol visibility for DECL to
21496 VISIBILITY_TYPE. */
21498 static void
21499 rs6000_assemble_visibility (tree decl, int vis)
21501 if (TARGET_XCOFF)
21502 return;
21504 /* Functions need to have their entry point symbol visibility set as
21505 well as their descriptor symbol visibility. */
21506 if (DEFAULT_ABI == ABI_AIX
21507 && DOT_SYMBOLS
21508 && TREE_CODE (decl) == FUNCTION_DECL)
21510 static const char * const visibility_types[] = {
21511 NULL, "internal", "hidden", "protected"
21514 const char *name, *type;
21516 name = ((* targetm.strip_name_encoding)
21517 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21518 type = visibility_types[vis];
21520 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21521 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21523 else
21524 default_assemble_visibility (decl, vis);
21526 #endif
21528 enum rtx_code
21529 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21531 /* Reversal of FP compares takes care -- an ordered compare
21532 becomes an unordered compare and vice versa. */
21533 if (mode == CCFPmode
21534 && (!flag_finite_math_only
21535 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21536 || code == UNEQ || code == LTGT))
21537 return reverse_condition_maybe_unordered (code);
21538 else
21539 return reverse_condition (code);
21542 /* Generate a compare for CODE. Return a brand-new rtx that
21543 represents the result of the compare. */
21545 static rtx
21546 rs6000_generate_compare (rtx cmp, machine_mode mode)
21548 machine_mode comp_mode;
21549 rtx compare_result;
21550 enum rtx_code code = GET_CODE (cmp);
21551 rtx op0 = XEXP (cmp, 0);
21552 rtx op1 = XEXP (cmp, 1);
21554 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21555 comp_mode = CCmode;
21556 else if (FLOAT_MODE_P (mode))
21557 comp_mode = CCFPmode;
21558 else if (code == GTU || code == LTU
21559 || code == GEU || code == LEU)
21560 comp_mode = CCUNSmode;
21561 else if ((code == EQ || code == NE)
21562 && unsigned_reg_p (op0)
21563 && (unsigned_reg_p (op1)
21564 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21565 /* These are unsigned values, perhaps there will be a later
21566 ordering compare that can be shared with this one. */
21567 comp_mode = CCUNSmode;
21568 else
21569 comp_mode = CCmode;
21571 /* If we have an unsigned compare, make sure we don't have a signed value as
21572 an immediate. */
21573 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21574 && INTVAL (op1) < 0)
21576 op0 = copy_rtx_if_shared (op0);
21577 op1 = force_reg (GET_MODE (op0), op1);
21578 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21581 /* First, the compare. */
21582 compare_result = gen_reg_rtx (comp_mode);
21584 /* E500 FP compare instructions on the GPRs. Yuck! */
21585 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
21586 && FLOAT_MODE_P (mode))
21588 rtx cmp, or_result, compare_result2;
21589 machine_mode op_mode = GET_MODE (op0);
21590 bool reverse_p;
21592 if (op_mode == VOIDmode)
21593 op_mode = GET_MODE (op1);
21595 /* First reverse the condition codes that aren't directly supported. */
21596 switch (code)
21598 case NE:
21599 case UNLT:
21600 case UNLE:
21601 case UNGT:
21602 case UNGE:
21603 code = reverse_condition_maybe_unordered (code);
21604 reverse_p = true;
21605 break;
21607 case EQ:
21608 case LT:
21609 case LE:
21610 case GT:
21611 case GE:
21612 reverse_p = false;
21613 break;
21615 default:
21616 gcc_unreachable ();
21619 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
21620 This explains the following mess. */
21622 switch (code)
21624 case EQ:
21625 switch (op_mode)
21627 case SFmode:
21628 cmp = (flag_finite_math_only && !flag_trapping_math)
21629 ? gen_tstsfeq_gpr (compare_result, op0, op1)
21630 : gen_cmpsfeq_gpr (compare_result, op0, op1);
21631 break;
21633 case DFmode:
21634 cmp = (flag_finite_math_only && !flag_trapping_math)
21635 ? gen_tstdfeq_gpr (compare_result, op0, op1)
21636 : gen_cmpdfeq_gpr (compare_result, op0, op1);
21637 break;
21639 case TFmode:
21640 case IFmode:
21641 case KFmode:
21642 cmp = (flag_finite_math_only && !flag_trapping_math)
21643 ? gen_tsttfeq_gpr (compare_result, op0, op1)
21644 : gen_cmptfeq_gpr (compare_result, op0, op1);
21645 break;
21647 default:
21648 gcc_unreachable ();
21650 break;
21652 case GT:
21653 case GE:
21654 switch (op_mode)
21656 case SFmode:
21657 cmp = (flag_finite_math_only && !flag_trapping_math)
21658 ? gen_tstsfgt_gpr (compare_result, op0, op1)
21659 : gen_cmpsfgt_gpr (compare_result, op0, op1);
21660 break;
21662 case DFmode:
21663 cmp = (flag_finite_math_only && !flag_trapping_math)
21664 ? gen_tstdfgt_gpr (compare_result, op0, op1)
21665 : gen_cmpdfgt_gpr (compare_result, op0, op1);
21666 break;
21668 case TFmode:
21669 case IFmode:
21670 case KFmode:
21671 cmp = (flag_finite_math_only && !flag_trapping_math)
21672 ? gen_tsttfgt_gpr (compare_result, op0, op1)
21673 : gen_cmptfgt_gpr (compare_result, op0, op1);
21674 break;
21676 default:
21677 gcc_unreachable ();
21679 break;
21681 case LT:
21682 case LE:
21683 switch (op_mode)
21685 case SFmode:
21686 cmp = (flag_finite_math_only && !flag_trapping_math)
21687 ? gen_tstsflt_gpr (compare_result, op0, op1)
21688 : gen_cmpsflt_gpr (compare_result, op0, op1);
21689 break;
21691 case DFmode:
21692 cmp = (flag_finite_math_only && !flag_trapping_math)
21693 ? gen_tstdflt_gpr (compare_result, op0, op1)
21694 : gen_cmpdflt_gpr (compare_result, op0, op1);
21695 break;
21697 case TFmode:
21698 case IFmode:
21699 case KFmode:
21700 cmp = (flag_finite_math_only && !flag_trapping_math)
21701 ? gen_tsttflt_gpr (compare_result, op0, op1)
21702 : gen_cmptflt_gpr (compare_result, op0, op1);
21703 break;
21705 default:
21706 gcc_unreachable ();
21708 break;
21710 default:
21711 gcc_unreachable ();
21714 /* Synthesize LE and GE from LT/GT || EQ. */
21715 if (code == LE || code == GE)
21717 emit_insn (cmp);
21719 compare_result2 = gen_reg_rtx (CCFPmode);
21721 /* Do the EQ. */
21722 switch (op_mode)
21724 case SFmode:
21725 cmp = (flag_finite_math_only && !flag_trapping_math)
21726 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
21727 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
21728 break;
21730 case DFmode:
21731 cmp = (flag_finite_math_only && !flag_trapping_math)
21732 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
21733 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
21734 break;
21736 case TFmode:
21737 case IFmode:
21738 case KFmode:
21739 cmp = (flag_finite_math_only && !flag_trapping_math)
21740 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
21741 : gen_cmptfeq_gpr (compare_result2, op0, op1);
21742 break;
21744 default:
21745 gcc_unreachable ();
21748 emit_insn (cmp);
21750 /* OR them together. */
21751 or_result = gen_reg_rtx (CCFPmode);
21752 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
21753 compare_result2);
21754 compare_result = or_result;
21757 code = reverse_p ? NE : EQ;
21759 emit_insn (cmp);
21762 /* IEEE 128-bit support in VSX registers when we do not have hardware
21763 support. */
21764 else if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21766 rtx libfunc = NULL_RTX;
21767 bool check_nan = false;
21768 rtx dest;
21770 switch (code)
21772 case EQ:
21773 case NE:
21774 libfunc = optab_libfunc (eq_optab, mode);
21775 break;
21777 case GT:
21778 case GE:
21779 libfunc = optab_libfunc (ge_optab, mode);
21780 break;
21782 case LT:
21783 case LE:
21784 libfunc = optab_libfunc (le_optab, mode);
21785 break;
21787 case UNORDERED:
21788 case ORDERED:
21789 libfunc = optab_libfunc (unord_optab, mode);
21790 code = (code == UNORDERED) ? NE : EQ;
21791 break;
21793 case UNGE:
21794 case UNGT:
21795 check_nan = true;
21796 libfunc = optab_libfunc (ge_optab, mode);
21797 code = (code == UNGE) ? GE : GT;
21798 break;
21800 case UNLE:
21801 case UNLT:
21802 check_nan = true;
21803 libfunc = optab_libfunc (le_optab, mode);
21804 code = (code == UNLE) ? LE : LT;
21805 break;
21807 case UNEQ:
21808 case LTGT:
21809 check_nan = true;
21810 libfunc = optab_libfunc (eq_optab, mode);
21811 code = (code = UNEQ) ? EQ : NE;
21812 break;
21814 default:
21815 gcc_unreachable ();
21818 gcc_assert (libfunc);
21820 if (!check_nan)
21821 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21822 SImode, 2, op0, mode, op1, mode);
21824 /* The library signals an exception for signalling NaNs, so we need to
21825 handle isgreater, etc. by first checking isordered. */
21826 else
21828 rtx ne_rtx, normal_dest, unord_dest;
21829 rtx unord_func = optab_libfunc (unord_optab, mode);
21830 rtx join_label = gen_label_rtx ();
21831 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21832 rtx unord_cmp = gen_reg_rtx (comp_mode);
21835 /* Test for either value being a NaN. */
21836 gcc_assert (unord_func);
21837 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21838 SImode, 2, op0, mode, op1,
21839 mode);
21841 /* Set value (0) if either value is a NaN, and jump to the join
21842 label. */
21843 dest = gen_reg_rtx (SImode);
21844 emit_move_insn (dest, const1_rtx);
21845 emit_insn (gen_rtx_SET (unord_cmp,
21846 gen_rtx_COMPARE (comp_mode, unord_dest,
21847 const0_rtx)));
21849 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21850 emit_jump_insn (gen_rtx_SET (pc_rtx,
21851 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21852 join_ref,
21853 pc_rtx)));
21855 /* Do the normal comparison, knowing that the values are not
21856 NaNs. */
21857 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21858 SImode, 2, op0, mode, op1,
21859 mode);
21861 emit_insn (gen_cstoresi4 (dest,
21862 gen_rtx_fmt_ee (code, SImode, normal_dest,
21863 const0_rtx),
21864 normal_dest, const0_rtx));
21866 /* Join NaN and non-Nan paths. Compare dest against 0. */
21867 emit_label (join_label);
21868 code = NE;
21871 emit_insn (gen_rtx_SET (compare_result,
21872 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21875 else
21877 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21878 CLOBBERs to match cmptf_internal2 pattern. */
21879 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21880 && FLOAT128_IBM_P (GET_MODE (op0))
21881 && TARGET_HARD_FLOAT && TARGET_FPRS)
21882 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21883 gen_rtvec (10,
21884 gen_rtx_SET (compare_result,
21885 gen_rtx_COMPARE (comp_mode, op0, op1)),
21886 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21887 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21888 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21889 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21890 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21891 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21892 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21893 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21894 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21895 else if (GET_CODE (op1) == UNSPEC
21896 && XINT (op1, 1) == UNSPEC_SP_TEST)
21898 rtx op1b = XVECEXP (op1, 0, 0);
21899 comp_mode = CCEQmode;
21900 compare_result = gen_reg_rtx (CCEQmode);
21901 if (TARGET_64BIT)
21902 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21903 else
21904 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21906 else
21907 emit_insn (gen_rtx_SET (compare_result,
21908 gen_rtx_COMPARE (comp_mode, op0, op1)));
21911 /* Some kinds of FP comparisons need an OR operation;
21912 under flag_finite_math_only we don't bother. */
21913 if (FLOAT_MODE_P (mode)
21914 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21915 && !flag_finite_math_only
21916 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
21917 && (code == LE || code == GE
21918 || code == UNEQ || code == LTGT
21919 || code == UNGT || code == UNLT))
21921 enum rtx_code or1, or2;
21922 rtx or1_rtx, or2_rtx, compare2_rtx;
21923 rtx or_result = gen_reg_rtx (CCEQmode);
21925 switch (code)
21927 case LE: or1 = LT; or2 = EQ; break;
21928 case GE: or1 = GT; or2 = EQ; break;
21929 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21930 case LTGT: or1 = LT; or2 = GT; break;
21931 case UNGT: or1 = UNORDERED; or2 = GT; break;
21932 case UNLT: or1 = UNORDERED; or2 = LT; break;
21933 default: gcc_unreachable ();
21935 validate_condition_mode (or1, comp_mode);
21936 validate_condition_mode (or2, comp_mode);
21937 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21938 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21939 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21940 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21941 const_true_rtx);
21942 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21944 compare_result = or_result;
21945 code = EQ;
21948 validate_condition_mode (code, GET_MODE (compare_result));
21950 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21954 /* Return the diagnostic message string if the binary operation OP is
21955 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21957 static const char*
21958 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21959 const_tree type1,
21960 const_tree type2)
21962 enum machine_mode mode1 = TYPE_MODE (type1);
21963 enum machine_mode mode2 = TYPE_MODE (type2);
21965 /* For complex modes, use the inner type. */
21966 if (COMPLEX_MODE_P (mode1))
21967 mode1 = GET_MODE_INNER (mode1);
21969 if (COMPLEX_MODE_P (mode2))
21970 mode2 = GET_MODE_INNER (mode2);
21972 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21973 double to intermix unless -mfloat128-convert. */
21974 if (mode1 == mode2)
21975 return NULL;
21977 if (!TARGET_FLOAT128_CVT)
21979 if ((mode1 == KFmode && mode2 == IFmode)
21980 || (mode1 == IFmode && mode2 == KFmode))
21981 return N_("__float128 and __ibm128 cannot be used in the same "
21982 "expression");
21984 if (TARGET_IEEEQUAD
21985 && ((mode1 == IFmode && mode2 == TFmode)
21986 || (mode1 == TFmode && mode2 == IFmode)))
21987 return N_("__ibm128 and long double cannot be used in the same "
21988 "expression");
21990 if (!TARGET_IEEEQUAD
21991 && ((mode1 == KFmode && mode2 == TFmode)
21992 || (mode1 == TFmode && mode2 == KFmode)))
21993 return N_("__float128 and long double cannot be used in the same "
21994 "expression");
21997 return NULL;
22001 /* Expand floating point conversion to/from __float128 and __ibm128. */
22003 void
22004 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22006 machine_mode dest_mode = GET_MODE (dest);
22007 machine_mode src_mode = GET_MODE (src);
22008 convert_optab cvt = unknown_optab;
22009 bool do_move = false;
22010 rtx libfunc = NULL_RTX;
22011 rtx dest2;
22012 typedef rtx (*rtx_2func_t) (rtx, rtx);
22013 rtx_2func_t hw_convert = (rtx_2func_t)0;
22014 size_t kf_or_tf;
22016 struct hw_conv_t {
22017 rtx_2func_t from_df;
22018 rtx_2func_t from_sf;
22019 rtx_2func_t from_si_sign;
22020 rtx_2func_t from_si_uns;
22021 rtx_2func_t from_di_sign;
22022 rtx_2func_t from_di_uns;
22023 rtx_2func_t to_df;
22024 rtx_2func_t to_sf;
22025 rtx_2func_t to_si_sign;
22026 rtx_2func_t to_si_uns;
22027 rtx_2func_t to_di_sign;
22028 rtx_2func_t to_di_uns;
22029 } hw_conversions[2] = {
22030 /* convertions to/from KFmode */
22032 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22033 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22034 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22035 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22036 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22037 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22038 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22039 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22040 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22041 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22042 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22043 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22046 /* convertions to/from TFmode */
22048 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22049 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22050 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22051 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22052 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22053 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22054 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22055 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22056 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22057 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22058 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22059 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22063 if (dest_mode == src_mode)
22064 gcc_unreachable ();
22066 /* Eliminate memory operations. */
22067 if (MEM_P (src))
22068 src = force_reg (src_mode, src);
22070 if (MEM_P (dest))
22072 rtx tmp = gen_reg_rtx (dest_mode);
22073 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22074 rs6000_emit_move (dest, tmp, dest_mode);
22075 return;
22078 /* Convert to IEEE 128-bit floating point. */
22079 if (FLOAT128_IEEE_P (dest_mode))
22081 if (dest_mode == KFmode)
22082 kf_or_tf = 0;
22083 else if (dest_mode == TFmode)
22084 kf_or_tf = 1;
22085 else
22086 gcc_unreachable ();
22088 switch (src_mode)
22090 case DFmode:
22091 cvt = sext_optab;
22092 hw_convert = hw_conversions[kf_or_tf].from_df;
22093 break;
22095 case SFmode:
22096 cvt = sext_optab;
22097 hw_convert = hw_conversions[kf_or_tf].from_sf;
22098 break;
22100 case KFmode:
22101 case IFmode:
22102 case TFmode:
22103 if (FLOAT128_IBM_P (src_mode))
22104 cvt = sext_optab;
22105 else
22106 do_move = true;
22107 break;
22109 case SImode:
22110 if (unsigned_p)
22112 cvt = ufloat_optab;
22113 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22115 else
22117 cvt = sfloat_optab;
22118 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22120 break;
22122 case DImode:
22123 if (unsigned_p)
22125 cvt = ufloat_optab;
22126 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22128 else
22130 cvt = sfloat_optab;
22131 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22133 break;
22135 default:
22136 gcc_unreachable ();
22140 /* Convert from IEEE 128-bit floating point. */
22141 else if (FLOAT128_IEEE_P (src_mode))
22143 if (src_mode == KFmode)
22144 kf_or_tf = 0;
22145 else if (src_mode == TFmode)
22146 kf_or_tf = 1;
22147 else
22148 gcc_unreachable ();
22150 switch (dest_mode)
22152 case DFmode:
22153 cvt = trunc_optab;
22154 hw_convert = hw_conversions[kf_or_tf].to_df;
22155 break;
22157 case SFmode:
22158 cvt = trunc_optab;
22159 hw_convert = hw_conversions[kf_or_tf].to_sf;
22160 break;
22162 case KFmode:
22163 case IFmode:
22164 case TFmode:
22165 if (FLOAT128_IBM_P (dest_mode))
22166 cvt = trunc_optab;
22167 else
22168 do_move = true;
22169 break;
22171 case SImode:
22172 if (unsigned_p)
22174 cvt = ufix_optab;
22175 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22177 else
22179 cvt = sfix_optab;
22180 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22182 break;
22184 case DImode:
22185 if (unsigned_p)
22187 cvt = ufix_optab;
22188 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22190 else
22192 cvt = sfix_optab;
22193 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22195 break;
22197 default:
22198 gcc_unreachable ();
22202 /* Both IBM format. */
22203 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22204 do_move = true;
22206 else
22207 gcc_unreachable ();
22209 /* Handle conversion between TFmode/KFmode. */
22210 if (do_move)
22211 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22213 /* Handle conversion if we have hardware support. */
22214 else if (TARGET_FLOAT128_HW && hw_convert)
22215 emit_insn ((hw_convert) (dest, src));
22217 /* Call an external function to do the conversion. */
22218 else if (cvt != unknown_optab)
22220 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22221 gcc_assert (libfunc != NULL_RTX);
22223 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode, 1, src,
22224 src_mode);
22226 gcc_assert (dest2 != NULL_RTX);
22227 if (!rtx_equal_p (dest, dest2))
22228 emit_move_insn (dest, dest2);
22231 else
22232 gcc_unreachable ();
22234 return;
22237 /* Split a conversion from __float128 to an integer type into separate insns.
22238 OPERANDS points to the destination, source, and V2DI temporary
22239 register. CODE is either FIX or UNSIGNED_FIX. */
22241 void
22242 convert_float128_to_int (rtx *operands, enum rtx_code code)
22244 rtx dest = operands[0];
22245 rtx src = operands[1];
22246 rtx tmp = operands[2];
22247 rtx cvt;
22248 rtvec cvt_vec;
22249 rtx cvt_unspec;
22250 rtvec move_vec;
22251 rtx move_unspec;
22253 if (GET_CODE (tmp) == SCRATCH)
22254 tmp = gen_reg_rtx (V2DImode);
22256 if (MEM_P (dest))
22257 dest = rs6000_address_for_fpconvert (dest);
22259 /* Generate the actual convert insn of the form:
22260 (set (tmp) (unspec:V2DI [(fix:SI (reg:KF))] UNSPEC_IEEE128_CONVERT)). */
22261 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), src);
22262 cvt_vec = gen_rtvec (1, cvt);
22263 cvt_unspec = gen_rtx_UNSPEC (V2DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
22264 emit_insn (gen_rtx_SET (tmp, cvt_unspec));
22266 /* Generate the move insn of the form:
22267 (set (dest:SI) (unspec:SI [(tmp:V2DI))] UNSPEC_IEEE128_MOVE)). */
22268 move_vec = gen_rtvec (1, tmp);
22269 move_unspec = gen_rtx_UNSPEC (GET_MODE (dest), move_vec, UNSPEC_IEEE128_MOVE);
22270 emit_insn (gen_rtx_SET (dest, move_unspec));
22273 /* Split a conversion from an integer type to __float128 into separate insns.
22274 OPERANDS points to the destination, source, and V2DI temporary
22275 register. CODE is either FLOAT or UNSIGNED_FLOAT. */
22277 void
22278 convert_int_to_float128 (rtx *operands, enum rtx_code code)
22280 rtx dest = operands[0];
22281 rtx src = operands[1];
22282 rtx tmp = operands[2];
22283 rtx cvt;
22284 rtvec cvt_vec;
22285 rtx cvt_unspec;
22286 rtvec move_vec;
22287 rtx move_unspec;
22288 rtx unsigned_flag;
22290 if (GET_CODE (tmp) == SCRATCH)
22291 tmp = gen_reg_rtx (V2DImode);
22293 if (MEM_P (src))
22294 src = rs6000_address_for_fpconvert (src);
22296 /* Generate the move of the integer into the Altivec register of the form:
22297 (set (tmp:V2DI) (unspec:V2DI [(src:SI)
22298 (const_int 0)] UNSPEC_IEEE128_MOVE)).
22301 (set (tmp:V2DI) (unspec:V2DI [(src:DI)] UNSPEC_IEEE128_MOVE)). */
22303 if (GET_MODE (src) == SImode)
22305 unsigned_flag = (code == UNSIGNED_FLOAT) ? const1_rtx : const0_rtx;
22306 move_vec = gen_rtvec (2, src, unsigned_flag);
22308 else
22309 move_vec = gen_rtvec (1, src);
22311 move_unspec = gen_rtx_UNSPEC (V2DImode, move_vec, UNSPEC_IEEE128_MOVE);
22312 emit_insn (gen_rtx_SET (tmp, move_unspec));
22314 /* Generate the actual convert insn of the form:
22315 (set (dest:KF) (float:KF (unspec:DI [(tmp:V2DI)]
22316 UNSPEC_IEEE128_CONVERT))). */
22317 cvt_vec = gen_rtvec (1, tmp);
22318 cvt_unspec = gen_rtx_UNSPEC (DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
22319 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), cvt_unspec);
22320 emit_insn (gen_rtx_SET (dest, cvt));
22324 /* Emit the RTL for an sISEL pattern. */
22326 void
22327 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22329 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22332 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22333 can be used as that dest register. Return the dest register. */
22336 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22338 if (op2 == const0_rtx)
22339 return op1;
22341 if (GET_CODE (scratch) == SCRATCH)
22342 scratch = gen_reg_rtx (mode);
22344 if (logical_operand (op2, mode))
22345 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22346 else
22347 emit_insn (gen_rtx_SET (scratch,
22348 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22350 return scratch;
22353 void
22354 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22356 rtx condition_rtx;
22357 machine_mode op_mode;
22358 enum rtx_code cond_code;
22359 rtx result = operands[0];
22361 condition_rtx = rs6000_generate_compare (operands[1], mode);
22362 cond_code = GET_CODE (condition_rtx);
22364 if (FLOAT_MODE_P (mode)
22365 && !TARGET_FPRS && TARGET_HARD_FLOAT)
22367 rtx t;
22369 PUT_MODE (condition_rtx, SImode);
22370 t = XEXP (condition_rtx, 0);
22372 gcc_assert (cond_code == NE || cond_code == EQ);
22374 if (cond_code == NE)
22375 emit_insn (gen_e500_flip_gt_bit (t, t));
22377 emit_insn (gen_move_from_CR_gt_bit (result, t));
22378 return;
22381 if (cond_code == NE
22382 || cond_code == GE || cond_code == LE
22383 || cond_code == GEU || cond_code == LEU
22384 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22386 rtx not_result = gen_reg_rtx (CCEQmode);
22387 rtx not_op, rev_cond_rtx;
22388 machine_mode cc_mode;
22390 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22392 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22393 SImode, XEXP (condition_rtx, 0), const0_rtx);
22394 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22395 emit_insn (gen_rtx_SET (not_result, not_op));
22396 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22399 op_mode = GET_MODE (XEXP (operands[1], 0));
22400 if (op_mode == VOIDmode)
22401 op_mode = GET_MODE (XEXP (operands[1], 1));
22403 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22405 PUT_MODE (condition_rtx, DImode);
22406 convert_move (result, condition_rtx, 0);
22408 else
22410 PUT_MODE (condition_rtx, SImode);
22411 emit_insn (gen_rtx_SET (result, condition_rtx));
22415 /* Emit a branch of kind CODE to location LOC. */
22417 void
22418 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22420 rtx condition_rtx, loc_ref;
22422 condition_rtx = rs6000_generate_compare (operands[0], mode);
22423 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22424 emit_jump_insn (gen_rtx_SET (pc_rtx,
22425 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22426 loc_ref, pc_rtx)));
22429 /* Return the string to output a conditional branch to LABEL, which is
22430 the operand template of the label, or NULL if the branch is really a
22431 conditional return.
22433 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22434 condition code register and its mode specifies what kind of
22435 comparison we made.
22437 REVERSED is nonzero if we should reverse the sense of the comparison.
22439 INSN is the insn. */
22441 char *
22442 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22444 static char string[64];
22445 enum rtx_code code = GET_CODE (op);
22446 rtx cc_reg = XEXP (op, 0);
22447 machine_mode mode = GET_MODE (cc_reg);
22448 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22449 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22450 int really_reversed = reversed ^ need_longbranch;
22451 char *s = string;
22452 const char *ccode;
22453 const char *pred;
22454 rtx note;
22456 validate_condition_mode (code, mode);
22458 /* Work out which way this really branches. We could use
22459 reverse_condition_maybe_unordered here always but this
22460 makes the resulting assembler clearer. */
22461 if (really_reversed)
22463 /* Reversal of FP compares takes care -- an ordered compare
22464 becomes an unordered compare and vice versa. */
22465 if (mode == CCFPmode)
22466 code = reverse_condition_maybe_unordered (code);
22467 else
22468 code = reverse_condition (code);
22471 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
22473 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
22474 to the GT bit. */
22475 switch (code)
22477 case EQ:
22478 /* Opposite of GT. */
22479 code = GT;
22480 break;
22482 case NE:
22483 code = UNLE;
22484 break;
22486 default:
22487 gcc_unreachable ();
22491 switch (code)
22493 /* Not all of these are actually distinct opcodes, but
22494 we distinguish them for clarity of the resulting assembler. */
22495 case NE: case LTGT:
22496 ccode = "ne"; break;
22497 case EQ: case UNEQ:
22498 ccode = "eq"; break;
22499 case GE: case GEU:
22500 ccode = "ge"; break;
22501 case GT: case GTU: case UNGT:
22502 ccode = "gt"; break;
22503 case LE: case LEU:
22504 ccode = "le"; break;
22505 case LT: case LTU: case UNLT:
22506 ccode = "lt"; break;
22507 case UNORDERED: ccode = "un"; break;
22508 case ORDERED: ccode = "nu"; break;
22509 case UNGE: ccode = "nl"; break;
22510 case UNLE: ccode = "ng"; break;
22511 default:
22512 gcc_unreachable ();
22515 /* Maybe we have a guess as to how likely the branch is. */
22516 pred = "";
22517 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22518 if (note != NULL_RTX)
22520 /* PROB is the difference from 50%. */
22521 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
22523 /* Only hint for highly probable/improbable branches on newer cpus when
22524 we have real profile data, as static prediction overrides processor
22525 dynamic prediction. For older cpus we may as well always hint, but
22526 assume not taken for branches that are very close to 50% as a
22527 mispredicted taken branch is more expensive than a
22528 mispredicted not-taken branch. */
22529 if (rs6000_always_hint
22530 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22531 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22532 && br_prob_note_reliable_p (note)))
22534 if (abs (prob) > REG_BR_PROB_BASE / 20
22535 && ((prob > 0) ^ need_longbranch))
22536 pred = "+";
22537 else
22538 pred = "-";
22542 if (label == NULL)
22543 s += sprintf (s, "b%slr%s ", ccode, pred);
22544 else
22545 s += sprintf (s, "b%s%s ", ccode, pred);
22547 /* We need to escape any '%' characters in the reg_names string.
22548 Assume they'd only be the first character.... */
22549 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22550 *s++ = '%';
22551 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22553 if (label != NULL)
22555 /* If the branch distance was too far, we may have to use an
22556 unconditional branch to go the distance. */
22557 if (need_longbranch)
22558 s += sprintf (s, ",$+8\n\tb %s", label);
22559 else
22560 s += sprintf (s, ",%s", label);
22563 return string;
22566 /* Return the string to flip the GT bit on a CR. */
22567 char *
22568 output_e500_flip_gt_bit (rtx dst, rtx src)
22570 static char string[64];
22571 int a, b;
22573 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
22574 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
22576 /* GT bit. */
22577 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
22578 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
22580 sprintf (string, "crnot %d,%d", a, b);
22581 return string;
22584 /* Return insn for VSX or Altivec comparisons. */
22586 static rtx
22587 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22589 rtx mask;
22590 machine_mode mode = GET_MODE (op0);
22592 switch (code)
22594 default:
22595 break;
22597 case GE:
22598 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22599 return NULL_RTX;
22601 case EQ:
22602 case GT:
22603 case GTU:
22604 case ORDERED:
22605 case UNORDERED:
22606 case UNEQ:
22607 case LTGT:
22608 mask = gen_reg_rtx (mode);
22609 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22610 return mask;
22613 return NULL_RTX;
22616 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22617 DMODE is expected destination mode. This is a recursive function. */
22619 static rtx
22620 rs6000_emit_vector_compare (enum rtx_code rcode,
22621 rtx op0, rtx op1,
22622 machine_mode dmode)
22624 rtx mask;
22625 bool swap_operands = false;
22626 bool try_again = false;
22628 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22629 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22631 /* See if the comparison works as is. */
22632 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22633 if (mask)
22634 return mask;
22636 switch (rcode)
22638 case LT:
22639 rcode = GT;
22640 swap_operands = true;
22641 try_again = true;
22642 break;
22643 case LTU:
22644 rcode = GTU;
22645 swap_operands = true;
22646 try_again = true;
22647 break;
22648 case NE:
22649 case UNLE:
22650 case UNLT:
22651 case UNGE:
22652 case UNGT:
22653 /* Invert condition and try again.
22654 e.g., A != B becomes ~(A==B). */
22656 enum rtx_code rev_code;
22657 enum insn_code nor_code;
22658 rtx mask2;
22660 rev_code = reverse_condition_maybe_unordered (rcode);
22661 if (rev_code == UNKNOWN)
22662 return NULL_RTX;
22664 nor_code = optab_handler (one_cmpl_optab, dmode);
22665 if (nor_code == CODE_FOR_nothing)
22666 return NULL_RTX;
22668 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22669 if (!mask2)
22670 return NULL_RTX;
22672 mask = gen_reg_rtx (dmode);
22673 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22674 return mask;
22676 break;
22677 case GE:
22678 case GEU:
22679 case LE:
22680 case LEU:
22681 /* Try GT/GTU/LT/LTU OR EQ */
22683 rtx c_rtx, eq_rtx;
22684 enum insn_code ior_code;
22685 enum rtx_code new_code;
22687 switch (rcode)
22689 case GE:
22690 new_code = GT;
22691 break;
22693 case GEU:
22694 new_code = GTU;
22695 break;
22697 case LE:
22698 new_code = LT;
22699 break;
22701 case LEU:
22702 new_code = LTU;
22703 break;
22705 default:
22706 gcc_unreachable ();
22709 ior_code = optab_handler (ior_optab, dmode);
22710 if (ior_code == CODE_FOR_nothing)
22711 return NULL_RTX;
22713 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22714 if (!c_rtx)
22715 return NULL_RTX;
22717 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22718 if (!eq_rtx)
22719 return NULL_RTX;
22721 mask = gen_reg_rtx (dmode);
22722 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22723 return mask;
22725 break;
22726 default:
22727 return NULL_RTX;
22730 if (try_again)
22732 if (swap_operands)
22733 std::swap (op0, op1);
22735 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22736 if (mask)
22737 return mask;
22740 /* You only get two chances. */
22741 return NULL_RTX;
22744 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22745 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22746 operands for the relation operation COND. */
22749 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22750 rtx cond, rtx cc_op0, rtx cc_op1)
22752 machine_mode dest_mode = GET_MODE (dest);
22753 machine_mode mask_mode = GET_MODE (cc_op0);
22754 enum rtx_code rcode = GET_CODE (cond);
22755 machine_mode cc_mode = CCmode;
22756 rtx mask;
22757 rtx cond2;
22758 rtx tmp;
22759 bool invert_move = false;
22761 if (VECTOR_UNIT_NONE_P (dest_mode))
22762 return 0;
22764 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22765 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22767 switch (rcode)
22769 /* Swap operands if we can, and fall back to doing the operation as
22770 specified, and doing a NOR to invert the test. */
22771 case NE:
22772 case UNLE:
22773 case UNLT:
22774 case UNGE:
22775 case UNGT:
22776 /* Invert condition and try again.
22777 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22778 invert_move = true;
22779 rcode = reverse_condition_maybe_unordered (rcode);
22780 if (rcode == UNKNOWN)
22781 return 0;
22782 break;
22784 /* Mark unsigned tests with CCUNSmode. */
22785 case GTU:
22786 case GEU:
22787 case LTU:
22788 case LEU:
22789 cc_mode = CCUNSmode;
22790 break;
22792 default:
22793 break;
22796 /* Get the vector mask for the given relational operations. */
22797 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22799 if (!mask)
22800 return 0;
22802 if (invert_move)
22804 tmp = op_true;
22805 op_true = op_false;
22806 op_false = tmp;
22809 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22810 CONST0_RTX (dest_mode));
22811 emit_insn (gen_rtx_SET (dest,
22812 gen_rtx_IF_THEN_ELSE (dest_mode,
22813 cond2,
22814 op_true,
22815 op_false)));
22816 return 1;
22819 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22820 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22821 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22822 hardware has no such operation. */
22824 static int
22825 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22827 enum rtx_code code = GET_CODE (op);
22828 rtx op0 = XEXP (op, 0);
22829 rtx op1 = XEXP (op, 1);
22830 machine_mode compare_mode = GET_MODE (op0);
22831 machine_mode result_mode = GET_MODE (dest);
22832 bool max_p = false;
22834 if (result_mode != compare_mode)
22835 return 0;
22837 if (code == GE || code == GT)
22838 max_p = true;
22839 else if (code == LE || code == LT)
22840 max_p = false;
22841 else
22842 return 0;
22844 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22847 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22848 max_p = !max_p;
22850 else
22851 return 0;
22853 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22854 return 1;
22857 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22858 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22859 operands of the last comparison is nonzero/true, FALSE_COND if it is
22860 zero/false. Return 0 if the hardware has no such operation. */
22862 static int
22863 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22865 enum rtx_code code = GET_CODE (op);
22866 rtx op0 = XEXP (op, 0);
22867 rtx op1 = XEXP (op, 1);
22868 machine_mode result_mode = GET_MODE (dest);
22869 rtx compare_rtx;
22870 rtx cmove_rtx;
22871 rtx clobber_rtx;
22873 if (!can_create_pseudo_p ())
22874 return 0;
22876 switch (code)
22878 case EQ:
22879 case GE:
22880 case GT:
22881 break;
22883 case NE:
22884 case LT:
22885 case LE:
22886 code = swap_condition (code);
22887 std::swap (op0, op1);
22888 break;
22890 default:
22891 return 0;
22894 /* Generate: [(parallel [(set (dest)
22895 (if_then_else (op (cmp1) (cmp2))
22896 (true)
22897 (false)))
22898 (clobber (scratch))])]. */
22900 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22901 cmove_rtx = gen_rtx_SET (dest,
22902 gen_rtx_IF_THEN_ELSE (result_mode,
22903 compare_rtx,
22904 true_cond,
22905 false_cond));
22907 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22908 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22909 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22911 return 1;
22914 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22915 operands of the last comparison is nonzero/true, FALSE_COND if it
22916 is zero/false. Return 0 if the hardware has no such operation. */
22919 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22921 enum rtx_code code = GET_CODE (op);
22922 rtx op0 = XEXP (op, 0);
22923 rtx op1 = XEXP (op, 1);
22924 machine_mode compare_mode = GET_MODE (op0);
22925 machine_mode result_mode = GET_MODE (dest);
22926 rtx temp;
22927 bool is_against_zero;
22929 /* These modes should always match. */
22930 if (GET_MODE (op1) != compare_mode
22931 /* In the isel case however, we can use a compare immediate, so
22932 op1 may be a small constant. */
22933 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22934 return 0;
22935 if (GET_MODE (true_cond) != result_mode)
22936 return 0;
22937 if (GET_MODE (false_cond) != result_mode)
22938 return 0;
22940 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22941 if (TARGET_P9_MINMAX
22942 && (compare_mode == SFmode || compare_mode == DFmode)
22943 && (result_mode == SFmode || result_mode == DFmode))
22945 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22946 return 1;
22948 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22949 return 1;
22952 /* Don't allow using floating point comparisons for integer results for
22953 now. */
22954 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22955 return 0;
22957 /* First, work out if the hardware can do this at all, or
22958 if it's too slow.... */
22959 if (!FLOAT_MODE_P (compare_mode))
22961 if (TARGET_ISEL)
22962 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22963 return 0;
22965 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
22966 && SCALAR_FLOAT_MODE_P (compare_mode))
22967 return 0;
22969 is_against_zero = op1 == CONST0_RTX (compare_mode);
22971 /* A floating-point subtract might overflow, underflow, or produce
22972 an inexact result, thus changing the floating-point flags, so it
22973 can't be generated if we care about that. It's safe if one side
22974 of the construct is zero, since then no subtract will be
22975 generated. */
22976 if (SCALAR_FLOAT_MODE_P (compare_mode)
22977 && flag_trapping_math && ! is_against_zero)
22978 return 0;
22980 /* Eliminate half of the comparisons by switching operands, this
22981 makes the remaining code simpler. */
22982 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22983 || code == LTGT || code == LT || code == UNLE)
22985 code = reverse_condition_maybe_unordered (code);
22986 temp = true_cond;
22987 true_cond = false_cond;
22988 false_cond = temp;
22991 /* UNEQ and LTGT take four instructions for a comparison with zero,
22992 it'll probably be faster to use a branch here too. */
22993 if (code == UNEQ && HONOR_NANS (compare_mode))
22994 return 0;
22996 /* We're going to try to implement comparisons by performing
22997 a subtract, then comparing against zero. Unfortunately,
22998 Inf - Inf is NaN which is not zero, and so if we don't
22999 know that the operand is finite and the comparison
23000 would treat EQ different to UNORDERED, we can't do it. */
23001 if (HONOR_INFINITIES (compare_mode)
23002 && code != GT && code != UNGE
23003 && (GET_CODE (op1) != CONST_DOUBLE
23004 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23005 /* Constructs of the form (a OP b ? a : b) are safe. */
23006 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23007 || (! rtx_equal_p (op0, true_cond)
23008 && ! rtx_equal_p (op1, true_cond))))
23009 return 0;
23011 /* At this point we know we can use fsel. */
23013 /* Reduce the comparison to a comparison against zero. */
23014 if (! is_against_zero)
23016 temp = gen_reg_rtx (compare_mode);
23017 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23018 op0 = temp;
23019 op1 = CONST0_RTX (compare_mode);
23022 /* If we don't care about NaNs we can reduce some of the comparisons
23023 down to faster ones. */
23024 if (! HONOR_NANS (compare_mode))
23025 switch (code)
23027 case GT:
23028 code = LE;
23029 temp = true_cond;
23030 true_cond = false_cond;
23031 false_cond = temp;
23032 break;
23033 case UNGE:
23034 code = GE;
23035 break;
23036 case UNEQ:
23037 code = EQ;
23038 break;
23039 default:
23040 break;
23043 /* Now, reduce everything down to a GE. */
23044 switch (code)
23046 case GE:
23047 break;
23049 case LE:
23050 temp = gen_reg_rtx (compare_mode);
23051 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23052 op0 = temp;
23053 break;
23055 case ORDERED:
23056 temp = gen_reg_rtx (compare_mode);
23057 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23058 op0 = temp;
23059 break;
23061 case EQ:
23062 temp = gen_reg_rtx (compare_mode);
23063 emit_insn (gen_rtx_SET (temp,
23064 gen_rtx_NEG (compare_mode,
23065 gen_rtx_ABS (compare_mode, op0))));
23066 op0 = temp;
23067 break;
23069 case UNGE:
23070 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23071 temp = gen_reg_rtx (result_mode);
23072 emit_insn (gen_rtx_SET (temp,
23073 gen_rtx_IF_THEN_ELSE (result_mode,
23074 gen_rtx_GE (VOIDmode,
23075 op0, op1),
23076 true_cond, false_cond)));
23077 false_cond = true_cond;
23078 true_cond = temp;
23080 temp = gen_reg_rtx (compare_mode);
23081 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23082 op0 = temp;
23083 break;
23085 case GT:
23086 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23087 temp = gen_reg_rtx (result_mode);
23088 emit_insn (gen_rtx_SET (temp,
23089 gen_rtx_IF_THEN_ELSE (result_mode,
23090 gen_rtx_GE (VOIDmode,
23091 op0, op1),
23092 true_cond, false_cond)));
23093 true_cond = false_cond;
23094 false_cond = temp;
23096 temp = gen_reg_rtx (compare_mode);
23097 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23098 op0 = temp;
23099 break;
23101 default:
23102 gcc_unreachable ();
23105 emit_insn (gen_rtx_SET (dest,
23106 gen_rtx_IF_THEN_ELSE (result_mode,
23107 gen_rtx_GE (VOIDmode,
23108 op0, op1),
23109 true_cond, false_cond)));
23110 return 1;
23113 /* Same as above, but for ints (isel). */
23115 static int
23116 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23118 rtx condition_rtx, cr;
23119 machine_mode mode = GET_MODE (dest);
23120 enum rtx_code cond_code;
23121 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23122 bool signedp;
23124 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23125 return 0;
23127 /* We still have to do the compare, because isel doesn't do a
23128 compare, it just looks at the CRx bits set by a previous compare
23129 instruction. */
23130 condition_rtx = rs6000_generate_compare (op, mode);
23131 cond_code = GET_CODE (condition_rtx);
23132 cr = XEXP (condition_rtx, 0);
23133 signedp = GET_MODE (cr) == CCmode;
23135 isel_func = (mode == SImode
23136 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23137 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23139 switch (cond_code)
23141 case LT: case GT: case LTU: case GTU: case EQ:
23142 /* isel handles these directly. */
23143 break;
23145 default:
23146 /* We need to swap the sense of the comparison. */
23148 std::swap (false_cond, true_cond);
23149 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23151 break;
23154 false_cond = force_reg (mode, false_cond);
23155 if (true_cond != const0_rtx)
23156 true_cond = force_reg (mode, true_cond);
23158 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23160 return 1;
23163 const char *
23164 output_isel (rtx *operands)
23166 enum rtx_code code;
23168 code = GET_CODE (operands[1]);
23170 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23172 gcc_assert (GET_CODE (operands[2]) == REG
23173 && GET_CODE (operands[3]) == REG);
23174 PUT_CODE (operands[1], reverse_condition (code));
23175 return "isel %0,%3,%2,%j1";
23178 return "isel %0,%2,%3,%j1";
23181 void
23182 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23184 machine_mode mode = GET_MODE (op0);
23185 enum rtx_code c;
23186 rtx target;
23188 /* VSX/altivec have direct min/max insns. */
23189 if ((code == SMAX || code == SMIN)
23190 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23191 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23193 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23194 return;
23197 if (code == SMAX || code == SMIN)
23198 c = GE;
23199 else
23200 c = GEU;
23202 if (code == SMAX || code == UMAX)
23203 target = emit_conditional_move (dest, c, op0, op1, mode,
23204 op0, op1, mode, 0);
23205 else
23206 target = emit_conditional_move (dest, c, op0, op1, mode,
23207 op1, op0, mode, 0);
23208 gcc_assert (target);
23209 if (target != dest)
23210 emit_move_insn (dest, target);
23213 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23214 for the value to come from memory or if it is already loaded into a GPR. */
23216 void
23217 rs6000_split_signbit (rtx dest, rtx src)
23219 machine_mode d_mode = GET_MODE (dest);
23220 machine_mode s_mode = GET_MODE (src);
23221 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23222 rtx shift_reg = dest_di;
23224 gcc_assert (REG_P (dest));
23225 gcc_assert (REG_P (src) || MEM_P (src));
23226 gcc_assert (s_mode == KFmode || s_mode == TFmode);
23228 if (MEM_P (src))
23230 rtx mem = (WORDS_BIG_ENDIAN
23231 ? adjust_address (src, DImode, 0)
23232 : adjust_address (src, DImode, 8));
23233 emit_insn (gen_rtx_SET (dest_di, mem));
23236 else
23238 unsigned int r = REGNO (src);
23240 /* If this is a VSX register, generate the special mfvsrd instruction
23241 to get it in a GPR. Until we support SF and DF modes, that will
23242 always be true. */
23243 gcc_assert (VSX_REGNO_P (r));
23245 if (s_mode == KFmode)
23246 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23247 else
23248 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23251 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23252 return;
23255 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23256 COND is true. Mark the jump as unlikely to be taken. */
23258 static void
23259 emit_unlikely_jump (rtx cond, rtx label)
23261 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
23262 rtx x;
23264 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23265 x = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23266 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
23269 /* A subroutine of the atomic operation splitters. Emit a load-locked
23270 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23271 the zero_extend operation. */
23273 static void
23274 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23276 rtx (*fn) (rtx, rtx) = NULL;
23278 switch (mode)
23280 case QImode:
23281 fn = gen_load_lockedqi;
23282 break;
23283 case HImode:
23284 fn = gen_load_lockedhi;
23285 break;
23286 case SImode:
23287 if (GET_MODE (mem) == QImode)
23288 fn = gen_load_lockedqi_si;
23289 else if (GET_MODE (mem) == HImode)
23290 fn = gen_load_lockedhi_si;
23291 else
23292 fn = gen_load_lockedsi;
23293 break;
23294 case DImode:
23295 fn = gen_load_lockeddi;
23296 break;
23297 case TImode:
23298 fn = gen_load_lockedti;
23299 break;
23300 default:
23301 gcc_unreachable ();
23303 emit_insn (fn (reg, mem));
23306 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23307 instruction in MODE. */
23309 static void
23310 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23312 rtx (*fn) (rtx, rtx, rtx) = NULL;
23314 switch (mode)
23316 case QImode:
23317 fn = gen_store_conditionalqi;
23318 break;
23319 case HImode:
23320 fn = gen_store_conditionalhi;
23321 break;
23322 case SImode:
23323 fn = gen_store_conditionalsi;
23324 break;
23325 case DImode:
23326 fn = gen_store_conditionaldi;
23327 break;
23328 case TImode:
23329 fn = gen_store_conditionalti;
23330 break;
23331 default:
23332 gcc_unreachable ();
23335 /* Emit sync before stwcx. to address PPC405 Erratum. */
23336 if (PPC405_ERRATUM77)
23337 emit_insn (gen_hwsync ());
23339 emit_insn (fn (res, mem, val));
23342 /* Expand barriers before and after a load_locked/store_cond sequence. */
23344 static rtx
23345 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23347 rtx addr = XEXP (mem, 0);
23348 int strict_p = (reload_in_progress || reload_completed);
23350 if (!legitimate_indirect_address_p (addr, strict_p)
23351 && !legitimate_indexed_address_p (addr, strict_p))
23353 addr = force_reg (Pmode, addr);
23354 mem = replace_equiv_address_nv (mem, addr);
23357 switch (model)
23359 case MEMMODEL_RELAXED:
23360 case MEMMODEL_CONSUME:
23361 case MEMMODEL_ACQUIRE:
23362 break;
23363 case MEMMODEL_RELEASE:
23364 case MEMMODEL_ACQ_REL:
23365 emit_insn (gen_lwsync ());
23366 break;
23367 case MEMMODEL_SEQ_CST:
23368 emit_insn (gen_hwsync ());
23369 break;
23370 default:
23371 gcc_unreachable ();
23373 return mem;
23376 static void
23377 rs6000_post_atomic_barrier (enum memmodel model)
23379 switch (model)
23381 case MEMMODEL_RELAXED:
23382 case MEMMODEL_CONSUME:
23383 case MEMMODEL_RELEASE:
23384 break;
23385 case MEMMODEL_ACQUIRE:
23386 case MEMMODEL_ACQ_REL:
23387 case MEMMODEL_SEQ_CST:
23388 emit_insn (gen_isync ());
23389 break;
23390 default:
23391 gcc_unreachable ();
23395 /* A subroutine of the various atomic expanders. For sub-word operations,
23396 we must adjust things to operate on SImode. Given the original MEM,
23397 return a new aligned memory. Also build and return the quantities by
23398 which to shift and mask. */
23400 static rtx
23401 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23403 rtx addr, align, shift, mask, mem;
23404 HOST_WIDE_INT shift_mask;
23405 machine_mode mode = GET_MODE (orig_mem);
23407 /* For smaller modes, we have to implement this via SImode. */
23408 shift_mask = (mode == QImode ? 0x18 : 0x10);
23410 addr = XEXP (orig_mem, 0);
23411 addr = force_reg (GET_MODE (addr), addr);
23413 /* Aligned memory containing subword. Generate a new memory. We
23414 do not want any of the existing MEM_ATTR data, as we're now
23415 accessing memory outside the original object. */
23416 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23417 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23418 mem = gen_rtx_MEM (SImode, align);
23419 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23420 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23421 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23423 /* Shift amount for subword relative to aligned word. */
23424 shift = gen_reg_rtx (SImode);
23425 addr = gen_lowpart (SImode, addr);
23426 rtx tmp = gen_reg_rtx (SImode);
23427 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23428 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23429 if (BYTES_BIG_ENDIAN)
23430 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23431 shift, 1, OPTAB_LIB_WIDEN);
23432 *pshift = shift;
23434 /* Mask for insertion. */
23435 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23436 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23437 *pmask = mask;
23439 return mem;
23442 /* A subroutine of the various atomic expanders. For sub-word operands,
23443 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23445 static rtx
23446 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23448 rtx x;
23450 x = gen_reg_rtx (SImode);
23451 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23452 gen_rtx_NOT (SImode, mask),
23453 oldval)));
23455 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23457 return x;
23460 /* A subroutine of the various atomic expanders. For sub-word operands,
23461 extract WIDE to NARROW via SHIFT. */
23463 static void
23464 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23466 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23467 wide, 1, OPTAB_LIB_WIDEN);
23468 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23471 /* Expand an atomic compare and swap operation. */
23473 void
23474 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23476 rtx boolval, retval, mem, oldval, newval, cond;
23477 rtx label1, label2, x, mask, shift;
23478 machine_mode mode, orig_mode;
23479 enum memmodel mod_s, mod_f;
23480 bool is_weak;
23482 boolval = operands[0];
23483 retval = operands[1];
23484 mem = operands[2];
23485 oldval = operands[3];
23486 newval = operands[4];
23487 is_weak = (INTVAL (operands[5]) != 0);
23488 mod_s = memmodel_base (INTVAL (operands[6]));
23489 mod_f = memmodel_base (INTVAL (operands[7]));
23490 orig_mode = mode = GET_MODE (mem);
23492 mask = shift = NULL_RTX;
23493 if (mode == QImode || mode == HImode)
23495 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23496 lwarx and shift/mask operations. With power8, we need to do the
23497 comparison in SImode, but the store is still done in QI/HImode. */
23498 oldval = convert_modes (SImode, mode, oldval, 1);
23500 if (!TARGET_SYNC_HI_QI)
23502 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23504 /* Shift and mask OLDVAL into position with the word. */
23505 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23506 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23508 /* Shift and mask NEWVAL into position within the word. */
23509 newval = convert_modes (SImode, mode, newval, 1);
23510 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23511 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23514 /* Prepare to adjust the return value. */
23515 retval = gen_reg_rtx (SImode);
23516 mode = SImode;
23518 else if (reg_overlap_mentioned_p (retval, oldval))
23519 oldval = copy_to_reg (oldval);
23521 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23522 oldval = copy_to_mode_reg (mode, oldval);
23524 if (reg_overlap_mentioned_p (retval, newval))
23525 newval = copy_to_reg (newval);
23527 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23529 label1 = NULL_RTX;
23530 if (!is_weak)
23532 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23533 emit_label (XEXP (label1, 0));
23535 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23537 emit_load_locked (mode, retval, mem);
23539 x = retval;
23540 if (mask)
23541 x = expand_simple_binop (SImode, AND, retval, mask,
23542 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23544 cond = gen_reg_rtx (CCmode);
23545 /* If we have TImode, synthesize a comparison. */
23546 if (mode != TImode)
23547 x = gen_rtx_COMPARE (CCmode, x, oldval);
23548 else
23550 rtx xor1_result = gen_reg_rtx (DImode);
23551 rtx xor2_result = gen_reg_rtx (DImode);
23552 rtx or_result = gen_reg_rtx (DImode);
23553 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23554 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23555 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23556 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23558 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23559 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23560 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23561 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23564 emit_insn (gen_rtx_SET (cond, x));
23566 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23567 emit_unlikely_jump (x, label2);
23569 x = newval;
23570 if (mask)
23571 x = rs6000_mask_atomic_subword (retval, newval, mask);
23573 emit_store_conditional (orig_mode, cond, mem, x);
23575 if (!is_weak)
23577 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23578 emit_unlikely_jump (x, label1);
23581 if (!is_mm_relaxed (mod_f))
23582 emit_label (XEXP (label2, 0));
23584 rs6000_post_atomic_barrier (mod_s);
23586 if (is_mm_relaxed (mod_f))
23587 emit_label (XEXP (label2, 0));
23589 if (shift)
23590 rs6000_finish_atomic_subword (operands[1], retval, shift);
23591 else if (mode != GET_MODE (operands[1]))
23592 convert_move (operands[1], retval, 1);
23594 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23595 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23596 emit_insn (gen_rtx_SET (boolval, x));
23599 /* Expand an atomic exchange operation. */
23601 void
23602 rs6000_expand_atomic_exchange (rtx operands[])
23604 rtx retval, mem, val, cond;
23605 machine_mode mode;
23606 enum memmodel model;
23607 rtx label, x, mask, shift;
23609 retval = operands[0];
23610 mem = operands[1];
23611 val = operands[2];
23612 model = memmodel_base (INTVAL (operands[3]));
23613 mode = GET_MODE (mem);
23615 mask = shift = NULL_RTX;
23616 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23618 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23620 /* Shift and mask VAL into position with the word. */
23621 val = convert_modes (SImode, mode, val, 1);
23622 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23623 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23625 /* Prepare to adjust the return value. */
23626 retval = gen_reg_rtx (SImode);
23627 mode = SImode;
23630 mem = rs6000_pre_atomic_barrier (mem, model);
23632 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23633 emit_label (XEXP (label, 0));
23635 emit_load_locked (mode, retval, mem);
23637 x = val;
23638 if (mask)
23639 x = rs6000_mask_atomic_subword (retval, val, mask);
23641 cond = gen_reg_rtx (CCmode);
23642 emit_store_conditional (mode, cond, mem, x);
23644 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23645 emit_unlikely_jump (x, label);
23647 rs6000_post_atomic_barrier (model);
23649 if (shift)
23650 rs6000_finish_atomic_subword (operands[0], retval, shift);
23653 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23654 to perform. MEM is the memory on which to operate. VAL is the second
23655 operand of the binary operator. BEFORE and AFTER are optional locations to
23656 return the value of MEM either before of after the operation. MODEL_RTX
23657 is a CONST_INT containing the memory model to use. */
23659 void
23660 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23661 rtx orig_before, rtx orig_after, rtx model_rtx)
23663 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23664 machine_mode mode = GET_MODE (mem);
23665 machine_mode store_mode = mode;
23666 rtx label, x, cond, mask, shift;
23667 rtx before = orig_before, after = orig_after;
23669 mask = shift = NULL_RTX;
23670 /* On power8, we want to use SImode for the operation. On previous systems,
23671 use the operation in a subword and shift/mask to get the proper byte or
23672 halfword. */
23673 if (mode == QImode || mode == HImode)
23675 if (TARGET_SYNC_HI_QI)
23677 val = convert_modes (SImode, mode, val, 1);
23679 /* Prepare to adjust the return value. */
23680 before = gen_reg_rtx (SImode);
23681 if (after)
23682 after = gen_reg_rtx (SImode);
23683 mode = SImode;
23685 else
23687 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23689 /* Shift and mask VAL into position with the word. */
23690 val = convert_modes (SImode, mode, val, 1);
23691 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23692 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23694 switch (code)
23696 case IOR:
23697 case XOR:
23698 /* We've already zero-extended VAL. That is sufficient to
23699 make certain that it does not affect other bits. */
23700 mask = NULL;
23701 break;
23703 case AND:
23704 /* If we make certain that all of the other bits in VAL are
23705 set, that will be sufficient to not affect other bits. */
23706 x = gen_rtx_NOT (SImode, mask);
23707 x = gen_rtx_IOR (SImode, x, val);
23708 emit_insn (gen_rtx_SET (val, x));
23709 mask = NULL;
23710 break;
23712 case NOT:
23713 case PLUS:
23714 case MINUS:
23715 /* These will all affect bits outside the field and need
23716 adjustment via MASK within the loop. */
23717 break;
23719 default:
23720 gcc_unreachable ();
23723 /* Prepare to adjust the return value. */
23724 before = gen_reg_rtx (SImode);
23725 if (after)
23726 after = gen_reg_rtx (SImode);
23727 store_mode = mode = SImode;
23731 mem = rs6000_pre_atomic_barrier (mem, model);
23733 label = gen_label_rtx ();
23734 emit_label (label);
23735 label = gen_rtx_LABEL_REF (VOIDmode, label);
23737 if (before == NULL_RTX)
23738 before = gen_reg_rtx (mode);
23740 emit_load_locked (mode, before, mem);
23742 if (code == NOT)
23744 x = expand_simple_binop (mode, AND, before, val,
23745 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23746 after = expand_simple_unop (mode, NOT, x, after, 1);
23748 else
23750 after = expand_simple_binop (mode, code, before, val,
23751 after, 1, OPTAB_LIB_WIDEN);
23754 x = after;
23755 if (mask)
23757 x = expand_simple_binop (SImode, AND, after, mask,
23758 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23759 x = rs6000_mask_atomic_subword (before, x, mask);
23761 else if (store_mode != mode)
23762 x = convert_modes (store_mode, mode, x, 1);
23764 cond = gen_reg_rtx (CCmode);
23765 emit_store_conditional (store_mode, cond, mem, x);
23767 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23768 emit_unlikely_jump (x, label);
23770 rs6000_post_atomic_barrier (model);
23772 if (shift)
23774 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23775 then do the calcuations in a SImode register. */
23776 if (orig_before)
23777 rs6000_finish_atomic_subword (orig_before, before, shift);
23778 if (orig_after)
23779 rs6000_finish_atomic_subword (orig_after, after, shift);
23781 else if (store_mode != mode)
23783 /* QImode/HImode on machines with lbarx/lharx where we do the native
23784 operation and then do the calcuations in a SImode register. */
23785 if (orig_before)
23786 convert_move (orig_before, before, 1);
23787 if (orig_after)
23788 convert_move (orig_after, after, 1);
23790 else if (orig_after && after != orig_after)
23791 emit_move_insn (orig_after, after);
23794 /* Emit instructions to move SRC to DST. Called by splitters for
23795 multi-register moves. It will emit at most one instruction for
23796 each register that is accessed; that is, it won't emit li/lis pairs
23797 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23798 register. */
23800 void
23801 rs6000_split_multireg_move (rtx dst, rtx src)
23803 /* The register number of the first register being moved. */
23804 int reg;
23805 /* The mode that is to be moved. */
23806 machine_mode mode;
23807 /* The mode that the move is being done in, and its size. */
23808 machine_mode reg_mode;
23809 int reg_mode_size;
23810 /* The number of registers that will be moved. */
23811 int nregs;
23813 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23814 mode = GET_MODE (dst);
23815 nregs = hard_regno_nregs[reg][mode];
23816 if (FP_REGNO_P (reg))
23817 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23818 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23819 else if (ALTIVEC_REGNO_P (reg))
23820 reg_mode = V16QImode;
23821 else if (TARGET_E500_DOUBLE && FLOAT128_2REG_P (mode))
23822 reg_mode = DFmode;
23823 else
23824 reg_mode = word_mode;
23825 reg_mode_size = GET_MODE_SIZE (reg_mode);
23827 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23829 /* TDmode residing in FP registers is special, since the ISA requires that
23830 the lower-numbered word of a register pair is always the most significant
23831 word, even in little-endian mode. This does not match the usual subreg
23832 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23833 the appropriate constituent registers "by hand" in little-endian mode.
23835 Note we do not need to check for destructive overlap here since TDmode
23836 can only reside in even/odd register pairs. */
23837 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23839 rtx p_src, p_dst;
23840 int i;
23842 for (i = 0; i < nregs; i++)
23844 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23845 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23846 else
23847 p_src = simplify_gen_subreg (reg_mode, src, mode,
23848 i * reg_mode_size);
23850 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23851 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23852 else
23853 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23854 i * reg_mode_size);
23856 emit_insn (gen_rtx_SET (p_dst, p_src));
23859 return;
23862 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23864 /* Move register range backwards, if we might have destructive
23865 overlap. */
23866 int i;
23867 for (i = nregs - 1; i >= 0; i--)
23868 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23869 i * reg_mode_size),
23870 simplify_gen_subreg (reg_mode, src, mode,
23871 i * reg_mode_size)));
23873 else
23875 int i;
23876 int j = -1;
23877 bool used_update = false;
23878 rtx restore_basereg = NULL_RTX;
23880 if (MEM_P (src) && INT_REGNO_P (reg))
23882 rtx breg;
23884 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23885 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23887 rtx delta_rtx;
23888 breg = XEXP (XEXP (src, 0), 0);
23889 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23890 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23891 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23892 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23893 src = replace_equiv_address (src, breg);
23895 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23897 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23899 rtx basereg = XEXP (XEXP (src, 0), 0);
23900 if (TARGET_UPDATE)
23902 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23903 emit_insn (gen_rtx_SET (ndst,
23904 gen_rtx_MEM (reg_mode,
23905 XEXP (src, 0))));
23906 used_update = true;
23908 else
23909 emit_insn (gen_rtx_SET (basereg,
23910 XEXP (XEXP (src, 0), 1)));
23911 src = replace_equiv_address (src, basereg);
23913 else
23915 rtx basereg = gen_rtx_REG (Pmode, reg);
23916 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23917 src = replace_equiv_address (src, basereg);
23921 breg = XEXP (src, 0);
23922 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23923 breg = XEXP (breg, 0);
23925 /* If the base register we are using to address memory is
23926 also a destination reg, then change that register last. */
23927 if (REG_P (breg)
23928 && REGNO (breg) >= REGNO (dst)
23929 && REGNO (breg) < REGNO (dst) + nregs)
23930 j = REGNO (breg) - REGNO (dst);
23932 else if (MEM_P (dst) && INT_REGNO_P (reg))
23934 rtx breg;
23936 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23937 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23939 rtx delta_rtx;
23940 breg = XEXP (XEXP (dst, 0), 0);
23941 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23942 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23943 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23945 /* We have to update the breg before doing the store.
23946 Use store with update, if available. */
23948 if (TARGET_UPDATE)
23950 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23951 emit_insn (TARGET_32BIT
23952 ? (TARGET_POWERPC64
23953 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23954 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23955 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23956 used_update = true;
23958 else
23959 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23960 dst = replace_equiv_address (dst, breg);
23962 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
23963 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23965 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23967 rtx basereg = XEXP (XEXP (dst, 0), 0);
23968 if (TARGET_UPDATE)
23970 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23971 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23972 XEXP (dst, 0)),
23973 nsrc));
23974 used_update = true;
23976 else
23977 emit_insn (gen_rtx_SET (basereg,
23978 XEXP (XEXP (dst, 0), 1)));
23979 dst = replace_equiv_address (dst, basereg);
23981 else
23983 rtx basereg = XEXP (XEXP (dst, 0), 0);
23984 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23985 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23986 && REG_P (basereg)
23987 && REG_P (offsetreg)
23988 && REGNO (basereg) != REGNO (offsetreg));
23989 if (REGNO (basereg) == 0)
23991 rtx tmp = offsetreg;
23992 offsetreg = basereg;
23993 basereg = tmp;
23995 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23996 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23997 dst = replace_equiv_address (dst, basereg);
24000 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24001 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
24004 for (i = 0; i < nregs; i++)
24006 /* Calculate index to next subword. */
24007 ++j;
24008 if (j == nregs)
24009 j = 0;
24011 /* If compiler already emitted move of first word by
24012 store with update, no need to do anything. */
24013 if (j == 0 && used_update)
24014 continue;
24016 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24017 j * reg_mode_size),
24018 simplify_gen_subreg (reg_mode, src, mode,
24019 j * reg_mode_size)));
24021 if (restore_basereg != NULL_RTX)
24022 emit_insn (restore_basereg);
24027 /* This page contains routines that are used to determine what the
24028 function prologue and epilogue code will do and write them out. */
24030 static inline bool
24031 save_reg_p (int r)
24033 return !call_used_regs[r] && df_regs_ever_live_p (r);
24036 /* Determine whether the gp REG is really used. */
24038 static bool
24039 rs6000_reg_live_or_pic_offset_p (int reg)
24041 /* We need to mark the PIC offset register live for the same conditions
24042 as it is set up, or otherwise it won't be saved before we clobber it. */
24044 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24046 if (TARGET_TOC && TARGET_MINIMAL_TOC
24047 && (crtl->calls_eh_return
24048 || df_regs_ever_live_p (reg)
24049 || get_pool_size ()))
24050 return true;
24052 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24053 && flag_pic)
24054 return true;
24057 /* If the function calls eh_return, claim used all the registers that would
24058 be checked for liveness otherwise. */
24060 return ((crtl->calls_eh_return || df_regs_ever_live_p (reg))
24061 && !call_used_regs[reg]);
24064 /* Return the first fixed-point register that is required to be
24065 saved. 32 if none. */
24068 first_reg_to_save (void)
24070 int first_reg;
24072 /* Find lowest numbered live register. */
24073 for (first_reg = 13; first_reg <= 31; first_reg++)
24074 if (save_reg_p (first_reg))
24075 break;
24077 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
24078 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
24079 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24080 || (TARGET_TOC && TARGET_MINIMAL_TOC))
24081 && rs6000_reg_live_or_pic_offset_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
24082 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
24084 #if TARGET_MACHO
24085 if (flag_pic
24086 && crtl->uses_pic_offset_table
24087 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24088 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24089 #endif
24091 return first_reg;
24094 /* Similar, for FP regs. */
24097 first_fp_reg_to_save (void)
24099 int first_reg;
24101 /* Find lowest numbered live register. */
24102 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24103 if (save_reg_p (first_reg))
24104 break;
24106 return first_reg;
24109 /* Similar, for AltiVec regs. */
24111 static int
24112 first_altivec_reg_to_save (void)
24114 int i;
24116 /* Stack frame remains as is unless we are in AltiVec ABI. */
24117 if (! TARGET_ALTIVEC_ABI)
24118 return LAST_ALTIVEC_REGNO + 1;
24120 /* On Darwin, the unwind routines are compiled without
24121 TARGET_ALTIVEC, and use save_world to save/restore the
24122 altivec registers when necessary. */
24123 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24124 && ! TARGET_ALTIVEC)
24125 return FIRST_ALTIVEC_REGNO + 20;
24127 /* Find lowest numbered live register. */
24128 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24129 if (save_reg_p (i))
24130 break;
24132 return i;
24135 /* Return a 32-bit mask of the AltiVec registers we need to set in
24136 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24137 the 32-bit word is 0. */
24139 static unsigned int
24140 compute_vrsave_mask (void)
24142 unsigned int i, mask = 0;
24144 /* On Darwin, the unwind routines are compiled without
24145 TARGET_ALTIVEC, and use save_world to save/restore the
24146 call-saved altivec registers when necessary. */
24147 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24148 && ! TARGET_ALTIVEC)
24149 mask |= 0xFFF;
24151 /* First, find out if we use _any_ altivec registers. */
24152 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24153 if (df_regs_ever_live_p (i))
24154 mask |= ALTIVEC_REG_BIT (i);
24156 if (mask == 0)
24157 return mask;
24159 /* Next, remove the argument registers from the set. These must
24160 be in the VRSAVE mask set by the caller, so we don't need to add
24161 them in again. More importantly, the mask we compute here is
24162 used to generate CLOBBERs in the set_vrsave insn, and we do not
24163 wish the argument registers to die. */
24164 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24165 mask &= ~ALTIVEC_REG_BIT (i);
24167 /* Similarly, remove the return value from the set. */
24169 bool yes = false;
24170 diddle_return_value (is_altivec_return_reg, &yes);
24171 if (yes)
24172 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24175 return mask;
24178 /* For a very restricted set of circumstances, we can cut down the
24179 size of prologues/epilogues by calling our own save/restore-the-world
24180 routines. */
24182 static void
24183 compute_save_world_info (rs6000_stack_t *info)
24185 info->world_save_p = 1;
24186 info->world_save_p
24187 = (WORLD_SAVE_P (info)
24188 && DEFAULT_ABI == ABI_DARWIN
24189 && !cfun->has_nonlocal_label
24190 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24191 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24192 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24193 && info->cr_save_p);
24195 /* This will not work in conjunction with sibcalls. Make sure there
24196 are none. (This check is expensive, but seldom executed.) */
24197 if (WORLD_SAVE_P (info))
24199 rtx_insn *insn;
24200 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24201 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24203 info->world_save_p = 0;
24204 break;
24208 if (WORLD_SAVE_P (info))
24210 /* Even if we're not touching VRsave, make sure there's room on the
24211 stack for it, if it looks like we're calling SAVE_WORLD, which
24212 will attempt to save it. */
24213 info->vrsave_size = 4;
24215 /* If we are going to save the world, we need to save the link register too. */
24216 info->lr_save_p = 1;
24218 /* "Save" the VRsave register too if we're saving the world. */
24219 if (info->vrsave_mask == 0)
24220 info->vrsave_mask = compute_vrsave_mask ();
24222 /* Because the Darwin register save/restore routines only handle
24223 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24224 check. */
24225 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24226 && (info->first_altivec_reg_save
24227 >= FIRST_SAVED_ALTIVEC_REGNO));
24230 return;
24234 static void
24235 is_altivec_return_reg (rtx reg, void *xyes)
24237 bool *yes = (bool *) xyes;
24238 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24239 *yes = true;
24243 /* Return whether REG is a global user reg or has been specifed by
24244 -ffixed-REG. We should not restore these, and so cannot use
24245 lmw or out-of-line restore functions if there are any. We also
24246 can't save them (well, emit frame notes for them), because frame
24247 unwinding during exception handling will restore saved registers. */
24249 static bool
24250 fixed_reg_p (int reg)
24252 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24253 backend sets it, overriding anything the user might have given. */
24254 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24255 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24256 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24257 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24258 return false;
24260 return fixed_regs[reg];
24263 /* Determine the strategy for savings/restoring registers. */
24265 enum {
24266 SAVE_MULTIPLE = 0x1,
24267 SAVE_INLINE_GPRS = 0x2,
24268 SAVE_INLINE_FPRS = 0x4,
24269 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24270 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24271 SAVE_INLINE_VRS = 0x20,
24272 REST_MULTIPLE = 0x100,
24273 REST_INLINE_GPRS = 0x200,
24274 REST_INLINE_FPRS = 0x400,
24275 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24276 REST_INLINE_VRS = 0x1000
24279 static int
24280 rs6000_savres_strategy (rs6000_stack_t *info,
24281 bool using_static_chain_p)
24283 int strategy = 0;
24285 /* Select between in-line and out-of-line save and restore of regs.
24286 First, all the obvious cases where we don't use out-of-line. */
24287 if (crtl->calls_eh_return
24288 || cfun->machine->ra_need_lr)
24289 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24290 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24291 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24293 if (info->first_gp_reg_save == 32)
24294 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24296 if (info->first_fp_reg_save == 64
24297 /* The out-of-line FP routines use double-precision stores;
24298 we can't use those routines if we don't have such stores. */
24299 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24300 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24302 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24303 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24305 /* Define cutoff for using out-of-line functions to save registers. */
24306 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24308 if (!optimize_size)
24310 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24311 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24312 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24314 else
24316 /* Prefer out-of-line restore if it will exit. */
24317 if (info->first_fp_reg_save > 61)
24318 strategy |= SAVE_INLINE_FPRS;
24319 if (info->first_gp_reg_save > 29)
24321 if (info->first_fp_reg_save == 64)
24322 strategy |= SAVE_INLINE_GPRS;
24323 else
24324 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24326 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24327 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24330 else if (DEFAULT_ABI == ABI_DARWIN)
24332 if (info->first_fp_reg_save > 60)
24333 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24334 if (info->first_gp_reg_save > 29)
24335 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24336 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24338 else
24340 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24341 if (info->first_fp_reg_save > 61)
24342 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24343 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24344 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24347 /* Don't bother to try to save things out-of-line if r11 is occupied
24348 by the static chain. It would require too much fiddling and the
24349 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24350 pointer on Darwin, and AIX uses r1 or r12. */
24351 if (using_static_chain_p
24352 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24353 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24354 | SAVE_INLINE_GPRS
24355 | SAVE_INLINE_VRS);
24357 /* Saving CR interferes with the exit routines used on the SPE, so
24358 just punt here. */
24359 if (TARGET_SPE_ABI
24360 && info->spe_64bit_regs_used
24361 && info->cr_save_p)
24362 strategy |= REST_INLINE_GPRS;
24364 /* We can only use the out-of-line routines to restore fprs if we've
24365 saved all the registers from first_fp_reg_save in the prologue.
24366 Otherwise, we risk loading garbage. Of course, if we have saved
24367 out-of-line then we know we haven't skipped any fprs. */
24368 if ((strategy & SAVE_INLINE_FPRS)
24369 && !(strategy & REST_INLINE_FPRS))
24371 int i;
24373 for (i = info->first_fp_reg_save; i < 64; i++)
24374 if (fixed_regs[i] || !save_reg_p (i))
24376 strategy |= REST_INLINE_FPRS;
24377 break;
24381 /* Similarly, for altivec regs. */
24382 if ((strategy & SAVE_INLINE_VRS)
24383 && !(strategy & REST_INLINE_VRS))
24385 int i;
24387 for (i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24388 if (fixed_regs[i] || !save_reg_p (i))
24390 strategy |= REST_INLINE_VRS;
24391 break;
24395 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24396 saved is an out-of-line save or restore. Set up the value for
24397 the next test (excluding out-of-line gprs). */
24398 bool lr_save_p = (info->lr_save_p
24399 || !(strategy & SAVE_INLINE_FPRS)
24400 || !(strategy & SAVE_INLINE_VRS)
24401 || !(strategy & REST_INLINE_FPRS)
24402 || !(strategy & REST_INLINE_VRS));
24404 if (TARGET_MULTIPLE
24405 && !TARGET_POWERPC64
24406 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
24407 && info->first_gp_reg_save < 31)
24409 /* Prefer store multiple for saves over out-of-line routines,
24410 since the store-multiple instruction will always be smaller. */
24411 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24413 /* The situation is more complicated with load multiple. We'd
24414 prefer to use the out-of-line routines for restores, since the
24415 "exit" out-of-line routines can handle the restore of LR and the
24416 frame teardown. However if doesn't make sense to use the
24417 out-of-line routine if that is the only reason we'd need to save
24418 LR, and we can't use the "exit" out-of-line gpr restore if we
24419 have saved some fprs; In those cases it is advantageous to use
24420 load multiple when available. */
24421 if (info->first_fp_reg_save != 64 || !lr_save_p)
24422 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24425 /* Using the "exit" out-of-line routine does not improve code size
24426 if using it would require lr to be saved and if only saving one
24427 or two gprs. */
24428 else if (!lr_save_p && info->first_gp_reg_save > 29)
24429 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24431 /* We can only use load multiple or the out-of-line routines to
24432 restore gprs if we've saved all the registers from
24433 first_gp_reg_save. Otherwise, we risk loading garbage.
24434 Of course, if we have saved out-of-line or used stmw then we know
24435 we haven't skipped any gprs. */
24436 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24437 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24439 int i;
24441 for (i = info->first_gp_reg_save; i < 32; i++)
24442 if (fixed_reg_p (i) || !save_reg_p (i))
24444 strategy |= REST_INLINE_GPRS;
24445 strategy &= ~REST_MULTIPLE;
24446 break;
24450 if (TARGET_ELF && TARGET_64BIT)
24452 if (!(strategy & SAVE_INLINE_FPRS))
24453 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24454 else if (!(strategy & SAVE_INLINE_GPRS)
24455 && info->first_fp_reg_save == 64)
24456 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24458 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24459 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24461 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24462 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24464 return strategy;
24467 /* Calculate the stack information for the current function. This is
24468 complicated by having two separate calling sequences, the AIX calling
24469 sequence and the V.4 calling sequence.
24471 AIX (and Darwin/Mac OS X) stack frames look like:
24472 32-bit 64-bit
24473 SP----> +---------------------------------------+
24474 | back chain to caller | 0 0
24475 +---------------------------------------+
24476 | saved CR | 4 8 (8-11)
24477 +---------------------------------------+
24478 | saved LR | 8 16
24479 +---------------------------------------+
24480 | reserved for compilers | 12 24
24481 +---------------------------------------+
24482 | reserved for binders | 16 32
24483 +---------------------------------------+
24484 | saved TOC pointer | 20 40
24485 +---------------------------------------+
24486 | Parameter save area (P) | 24 48
24487 +---------------------------------------+
24488 | Alloca space (A) | 24+P etc.
24489 +---------------------------------------+
24490 | Local variable space (L) | 24+P+A
24491 +---------------------------------------+
24492 | Float/int conversion temporary (X) | 24+P+A+L
24493 +---------------------------------------+
24494 | Save area for AltiVec registers (W) | 24+P+A+L+X
24495 +---------------------------------------+
24496 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24497 +---------------------------------------+
24498 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24499 +---------------------------------------+
24500 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24501 +---------------------------------------+
24502 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24503 +---------------------------------------+
24504 old SP->| back chain to caller's caller |
24505 +---------------------------------------+
24507 The required alignment for AIX configurations is two words (i.e., 8
24508 or 16 bytes).
24510 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24512 SP----> +---------------------------------------+
24513 | Back chain to caller | 0
24514 +---------------------------------------+
24515 | Save area for CR | 8
24516 +---------------------------------------+
24517 | Saved LR | 16
24518 +---------------------------------------+
24519 | Saved TOC pointer | 24
24520 +---------------------------------------+
24521 | Parameter save area (P) | 32
24522 +---------------------------------------+
24523 | Alloca space (A) | 32+P
24524 +---------------------------------------+
24525 | Local variable space (L) | 32+P+A
24526 +---------------------------------------+
24527 | Save area for AltiVec registers (W) | 32+P+A+L
24528 +---------------------------------------+
24529 | AltiVec alignment padding (Y) | 32+P+A+L+W
24530 +---------------------------------------+
24531 | Save area for GP registers (G) | 32+P+A+L+W+Y
24532 +---------------------------------------+
24533 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24534 +---------------------------------------+
24535 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24536 +---------------------------------------+
24539 V.4 stack frames look like:
24541 SP----> +---------------------------------------+
24542 | back chain to caller | 0
24543 +---------------------------------------+
24544 | caller's saved LR | 4
24545 +---------------------------------------+
24546 | Parameter save area (P) | 8
24547 +---------------------------------------+
24548 | Alloca space (A) | 8+P
24549 +---------------------------------------+
24550 | Varargs save area (V) | 8+P+A
24551 +---------------------------------------+
24552 | Local variable space (L) | 8+P+A+V
24553 +---------------------------------------+
24554 | Float/int conversion temporary (X) | 8+P+A+V+L
24555 +---------------------------------------+
24556 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24557 +---------------------------------------+
24558 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24559 +---------------------------------------+
24560 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24561 +---------------------------------------+
24562 | SPE: area for 64-bit GP registers |
24563 +---------------------------------------+
24564 | SPE alignment padding |
24565 +---------------------------------------+
24566 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24567 +---------------------------------------+
24568 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24569 +---------------------------------------+
24570 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24571 +---------------------------------------+
24572 old SP->| back chain to caller's caller |
24573 +---------------------------------------+
24575 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24576 given. (But note below and in sysv4.h that we require only 8 and
24577 may round up the size of our stack frame anyways. The historical
24578 reason is early versions of powerpc-linux which didn't properly
24579 align the stack at program startup. A happy side-effect is that
24580 -mno-eabi libraries can be used with -meabi programs.)
24582 The EABI configuration defaults to the V.4 layout. However,
24583 the stack alignment requirements may differ. If -mno-eabi is not
24584 given, the required stack alignment is 8 bytes; if -mno-eabi is
24585 given, the required alignment is 16 bytes. (But see V.4 comment
24586 above.) */
24588 #ifndef ABI_STACK_BOUNDARY
24589 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24590 #endif
24592 static rs6000_stack_t *
24593 rs6000_stack_info (void)
24595 /* We should never be called for thunks, we are not set up for that. */
24596 gcc_assert (!cfun->is_thunk);
24598 rs6000_stack_t *info = &stack_info;
24599 int reg_size = TARGET_32BIT ? 4 : 8;
24600 int ehrd_size;
24601 int ehcr_size;
24602 int save_align;
24603 int first_gp;
24604 HOST_WIDE_INT non_fixed_size;
24605 bool using_static_chain_p;
24607 if (reload_completed && info->reload_completed)
24608 return info;
24610 memset (info, 0, sizeof (*info));
24611 info->reload_completed = reload_completed;
24613 if (TARGET_SPE)
24615 /* Cache value so we don't rescan instruction chain over and over. */
24616 if (cfun->machine->spe_insn_chain_scanned_p == 0)
24617 cfun->machine->spe_insn_chain_scanned_p
24618 = spe_func_has_64bit_regs_p () + 1;
24619 info->spe_64bit_regs_used = cfun->machine->spe_insn_chain_scanned_p - 1;
24622 /* Select which calling sequence. */
24623 info->abi = DEFAULT_ABI;
24625 /* Calculate which registers need to be saved & save area size. */
24626 info->first_gp_reg_save = first_reg_to_save ();
24627 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24628 even if it currently looks like we won't. Reload may need it to
24629 get at a constant; if so, it will have already created a constant
24630 pool entry for it. */
24631 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24632 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24633 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24634 && crtl->uses_const_pool
24635 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24636 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24637 else
24638 first_gp = info->first_gp_reg_save;
24640 info->gp_size = reg_size * (32 - first_gp);
24642 /* For the SPE, we have an additional upper 32-bits on each GPR.
24643 Ideally we should save the entire 64-bits only when the upper
24644 half is used in SIMD instructions. Since we only record
24645 registers live (not the size they are used in), this proves
24646 difficult because we'd have to traverse the instruction chain at
24647 the right time, taking reload into account. This is a real pain,
24648 so we opt to save the GPRs in 64-bits always if but one register
24649 gets used in 64-bits. Otherwise, all the registers in the frame
24650 get saved in 32-bits.
24652 So... since when we save all GPRs (except the SP) in 64-bits, the
24653 traditional GP save area will be empty. */
24654 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24655 info->gp_size = 0;
24657 info->first_fp_reg_save = first_fp_reg_to_save ();
24658 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24660 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24661 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24662 - info->first_altivec_reg_save);
24664 /* Does this function call anything? */
24665 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24667 /* Determine if we need to save the condition code registers. */
24668 if (save_reg_p (CR2_REGNO)
24669 || save_reg_p (CR3_REGNO)
24670 || save_reg_p (CR4_REGNO))
24672 info->cr_save_p = 1;
24673 if (DEFAULT_ABI == ABI_V4)
24674 info->cr_size = reg_size;
24677 /* If the current function calls __builtin_eh_return, then we need
24678 to allocate stack space for registers that will hold data for
24679 the exception handler. */
24680 if (crtl->calls_eh_return)
24682 unsigned int i;
24683 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24684 continue;
24686 /* SPE saves EH registers in 64-bits. */
24687 ehrd_size = i * (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0
24688 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
24690 else
24691 ehrd_size = 0;
24693 /* In the ELFv2 ABI, we also need to allocate space for separate
24694 CR field save areas if the function calls __builtin_eh_return. */
24695 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24697 /* This hard-codes that we have three call-saved CR fields. */
24698 ehcr_size = 3 * reg_size;
24699 /* We do *not* use the regular CR save mechanism. */
24700 info->cr_save_p = 0;
24702 else
24703 ehcr_size = 0;
24705 /* Determine various sizes. */
24706 info->reg_size = reg_size;
24707 info->fixed_size = RS6000_SAVE_AREA;
24708 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24709 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24710 TARGET_ALTIVEC ? 16 : 8);
24711 if (FRAME_GROWS_DOWNWARD)
24712 info->vars_size
24713 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24714 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24715 - (info->fixed_size + info->vars_size + info->parm_size);
24717 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24718 info->spe_gp_size = 8 * (32 - first_gp);
24720 if (TARGET_ALTIVEC_ABI)
24721 info->vrsave_mask = compute_vrsave_mask ();
24723 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24724 info->vrsave_size = 4;
24726 compute_save_world_info (info);
24728 /* Calculate the offsets. */
24729 switch (DEFAULT_ABI)
24731 case ABI_NONE:
24732 default:
24733 gcc_unreachable ();
24735 case ABI_AIX:
24736 case ABI_ELFv2:
24737 case ABI_DARWIN:
24738 info->fp_save_offset = -info->fp_size;
24739 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24741 if (TARGET_ALTIVEC_ABI)
24743 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24745 /* Align stack so vector save area is on a quadword boundary.
24746 The padding goes above the vectors. */
24747 if (info->altivec_size != 0)
24748 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24750 info->altivec_save_offset = info->vrsave_save_offset
24751 - info->altivec_padding_size
24752 - info->altivec_size;
24753 gcc_assert (info->altivec_size == 0
24754 || info->altivec_save_offset % 16 == 0);
24756 /* Adjust for AltiVec case. */
24757 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24759 else
24760 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24762 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24763 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24764 info->lr_save_offset = 2*reg_size;
24765 break;
24767 case ABI_V4:
24768 info->fp_save_offset = -info->fp_size;
24769 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24770 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24772 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24774 /* Align stack so SPE GPR save area is aligned on a
24775 double-word boundary. */
24776 if (info->spe_gp_size != 0 && info->cr_save_offset != 0)
24777 info->spe_padding_size = 8 - (-info->cr_save_offset % 8);
24778 else
24779 info->spe_padding_size = 0;
24781 info->spe_gp_save_offset = info->cr_save_offset
24782 - info->spe_padding_size
24783 - info->spe_gp_size;
24785 /* Adjust for SPE case. */
24786 info->ehrd_offset = info->spe_gp_save_offset;
24788 else if (TARGET_ALTIVEC_ABI)
24790 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24792 /* Align stack so vector save area is on a quadword boundary. */
24793 if (info->altivec_size != 0)
24794 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24796 info->altivec_save_offset = info->vrsave_save_offset
24797 - info->altivec_padding_size
24798 - info->altivec_size;
24800 /* Adjust for AltiVec case. */
24801 info->ehrd_offset = info->altivec_save_offset;
24803 else
24804 info->ehrd_offset = info->cr_save_offset;
24806 info->ehrd_offset -= ehrd_size;
24807 info->lr_save_offset = reg_size;
24810 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24811 info->save_size = RS6000_ALIGN (info->fp_size
24812 + info->gp_size
24813 + info->altivec_size
24814 + info->altivec_padding_size
24815 + info->spe_gp_size
24816 + info->spe_padding_size
24817 + ehrd_size
24818 + ehcr_size
24819 + info->cr_size
24820 + info->vrsave_size,
24821 save_align);
24823 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24825 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24826 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24828 /* Determine if we need to save the link register. */
24829 if (info->calls_p
24830 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24831 && crtl->profile
24832 && !TARGET_PROFILE_KERNEL)
24833 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24834 #ifdef TARGET_RELOCATABLE
24835 || (DEFAULT_ABI == ABI_V4
24836 && (TARGET_RELOCATABLE || flag_pic > 1)
24837 && get_pool_size () != 0)
24838 #endif
24839 || rs6000_ra_ever_killed ())
24840 info->lr_save_p = 1;
24842 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24843 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24844 && call_used_regs[STATIC_CHAIN_REGNUM]);
24845 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24847 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24848 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24849 || !(info->savres_strategy & SAVE_INLINE_VRS)
24850 || !(info->savres_strategy & REST_INLINE_GPRS)
24851 || !(info->savres_strategy & REST_INLINE_FPRS)
24852 || !(info->savres_strategy & REST_INLINE_VRS))
24853 info->lr_save_p = 1;
24855 if (info->lr_save_p)
24856 df_set_regs_ever_live (LR_REGNO, true);
24858 /* Determine if we need to allocate any stack frame:
24860 For AIX we need to push the stack if a frame pointer is needed
24861 (because the stack might be dynamically adjusted), if we are
24862 debugging, if we make calls, or if the sum of fp_save, gp_save,
24863 and local variables are more than the space needed to save all
24864 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24865 + 18*8 = 288 (GPR13 reserved).
24867 For V.4 we don't have the stack cushion that AIX uses, but assume
24868 that the debugger can handle stackless frames. */
24870 if (info->calls_p)
24871 info->push_p = 1;
24873 else if (DEFAULT_ABI == ABI_V4)
24874 info->push_p = non_fixed_size != 0;
24876 else if (frame_pointer_needed)
24877 info->push_p = 1;
24879 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24880 info->push_p = 1;
24882 else
24883 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24885 return info;
24888 /* Return true if the current function uses any GPRs in 64-bit SIMD
24889 mode. */
24891 static bool
24892 spe_func_has_64bit_regs_p (void)
24894 rtx_insn *insns, *insn;
24896 /* Functions that save and restore all the call-saved registers will
24897 need to save/restore the registers in 64-bits. */
24898 if (crtl->calls_eh_return
24899 || cfun->calls_setjmp
24900 || crtl->has_nonlocal_goto)
24901 return true;
24903 insns = get_insns ();
24905 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
24907 if (INSN_P (insn))
24909 rtx i;
24911 /* FIXME: This should be implemented with attributes...
24913 (set_attr "spe64" "true")....then,
24914 if (get_spe64(insn)) return true;
24916 It's the only reliable way to do the stuff below. */
24918 i = PATTERN (insn);
24919 if (GET_CODE (i) == SET)
24921 machine_mode mode = GET_MODE (SET_SRC (i));
24923 if (SPE_VECTOR_MODE (mode))
24924 return true;
24925 if (TARGET_E500_DOUBLE
24926 && (mode == DFmode || FLOAT128_2REG_P (mode)))
24927 return true;
24932 return false;
24935 static void
24936 debug_stack_info (rs6000_stack_t *info)
24938 const char *abi_string;
24940 if (! info)
24941 info = rs6000_stack_info ();
24943 fprintf (stderr, "\nStack information for function %s:\n",
24944 ((current_function_decl && DECL_NAME (current_function_decl))
24945 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24946 : "<unknown>"));
24948 switch (info->abi)
24950 default: abi_string = "Unknown"; break;
24951 case ABI_NONE: abi_string = "NONE"; break;
24952 case ABI_AIX: abi_string = "AIX"; break;
24953 case ABI_ELFv2: abi_string = "ELFv2"; break;
24954 case ABI_DARWIN: abi_string = "Darwin"; break;
24955 case ABI_V4: abi_string = "V.4"; break;
24958 fprintf (stderr, "\tABI = %5s\n", abi_string);
24960 if (TARGET_ALTIVEC_ABI)
24961 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24963 if (TARGET_SPE_ABI)
24964 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
24966 if (info->first_gp_reg_save != 32)
24967 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24969 if (info->first_fp_reg_save != 64)
24970 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24972 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24973 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24974 info->first_altivec_reg_save);
24976 if (info->lr_save_p)
24977 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24979 if (info->cr_save_p)
24980 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24982 if (info->vrsave_mask)
24983 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24985 if (info->push_p)
24986 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24988 if (info->calls_p)
24989 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24991 if (info->gp_size)
24992 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24994 if (info->fp_size)
24995 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24997 if (info->altivec_size)
24998 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24999 info->altivec_save_offset);
25001 if (info->spe_gp_size)
25002 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
25003 info->spe_gp_save_offset);
25005 if (info->vrsave_size)
25006 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
25007 info->vrsave_save_offset);
25009 if (info->lr_save_p)
25010 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
25012 if (info->cr_save_p)
25013 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25015 if (info->varargs_save_offset)
25016 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25018 if (info->total_size)
25019 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25020 info->total_size);
25022 if (info->vars_size)
25023 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25024 info->vars_size);
25026 if (info->parm_size)
25027 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25029 if (info->fixed_size)
25030 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25032 if (info->gp_size)
25033 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25035 if (info->spe_gp_size)
25036 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
25038 if (info->fp_size)
25039 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25041 if (info->altivec_size)
25042 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25044 if (info->vrsave_size)
25045 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25047 if (info->altivec_padding_size)
25048 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25049 info->altivec_padding_size);
25051 if (info->spe_padding_size)
25052 fprintf (stderr, "\tspe_padding_size = %5d\n",
25053 info->spe_padding_size);
25055 if (info->cr_size)
25056 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25058 if (info->save_size)
25059 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25061 if (info->reg_size != 4)
25062 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25064 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25066 fprintf (stderr, "\n");
25070 rs6000_return_addr (int count, rtx frame)
25072 /* Currently we don't optimize very well between prolog and body
25073 code and for PIC code the code can be actually quite bad, so
25074 don't try to be too clever here. */
25075 if (count != 0
25076 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25078 cfun->machine->ra_needs_full_frame = 1;
25080 return
25081 gen_rtx_MEM
25082 (Pmode,
25083 memory_address
25084 (Pmode,
25085 plus_constant (Pmode,
25086 copy_to_reg
25087 (gen_rtx_MEM (Pmode,
25088 memory_address (Pmode, frame))),
25089 RETURN_ADDRESS_OFFSET)));
25092 cfun->machine->ra_need_lr = 1;
25093 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25096 /* Say whether a function is a candidate for sibcall handling or not. */
25098 static bool
25099 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25101 tree fntype;
25103 if (decl)
25104 fntype = TREE_TYPE (decl);
25105 else
25106 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25108 /* We can't do it if the called function has more vector parameters
25109 than the current function; there's nowhere to put the VRsave code. */
25110 if (TARGET_ALTIVEC_ABI
25111 && TARGET_ALTIVEC_VRSAVE
25112 && !(decl && decl == current_function_decl))
25114 function_args_iterator args_iter;
25115 tree type;
25116 int nvreg = 0;
25118 /* Functions with vector parameters are required to have a
25119 prototype, so the argument type info must be available
25120 here. */
25121 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25122 if (TREE_CODE (type) == VECTOR_TYPE
25123 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25124 nvreg++;
25126 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25127 if (TREE_CODE (type) == VECTOR_TYPE
25128 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25129 nvreg--;
25131 if (nvreg > 0)
25132 return false;
25135 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25136 functions, because the callee may have a different TOC pointer to
25137 the caller and there's no way to ensure we restore the TOC when
25138 we return. With the secure-plt SYSV ABI we can't make non-local
25139 calls when -fpic/PIC because the plt call stubs use r30. */
25140 if (DEFAULT_ABI == ABI_DARWIN
25141 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25142 && decl
25143 && !DECL_EXTERNAL (decl)
25144 && !DECL_WEAK (decl)
25145 && (*targetm.binds_local_p) (decl))
25146 || (DEFAULT_ABI == ABI_V4
25147 && (!TARGET_SECURE_PLT
25148 || !flag_pic
25149 || (decl
25150 && (*targetm.binds_local_p) (decl)))))
25152 tree attr_list = TYPE_ATTRIBUTES (fntype);
25154 if (!lookup_attribute ("longcall", attr_list)
25155 || lookup_attribute ("shortcall", attr_list))
25156 return true;
25159 return false;
25162 static int
25163 rs6000_ra_ever_killed (void)
25165 rtx_insn *top;
25166 rtx reg;
25167 rtx_insn *insn;
25169 if (cfun->is_thunk)
25170 return 0;
25172 if (cfun->machine->lr_save_state)
25173 return cfun->machine->lr_save_state - 1;
25175 /* regs_ever_live has LR marked as used if any sibcalls are present,
25176 but this should not force saving and restoring in the
25177 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25178 clobbers LR, so that is inappropriate. */
25180 /* Also, the prologue can generate a store into LR that
25181 doesn't really count, like this:
25183 move LR->R0
25184 bcl to set PIC register
25185 move LR->R31
25186 move R0->LR
25188 When we're called from the epilogue, we need to avoid counting
25189 this as a store. */
25191 push_topmost_sequence ();
25192 top = get_insns ();
25193 pop_topmost_sequence ();
25194 reg = gen_rtx_REG (Pmode, LR_REGNO);
25196 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25198 if (INSN_P (insn))
25200 if (CALL_P (insn))
25202 if (!SIBLING_CALL_P (insn))
25203 return 1;
25205 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25206 return 1;
25207 else if (set_of (reg, insn) != NULL_RTX
25208 && !prologue_epilogue_contains (insn))
25209 return 1;
25212 return 0;
25215 /* Emit instructions needed to load the TOC register.
25216 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25217 a constant pool; or for SVR4 -fpic. */
25219 void
25220 rs6000_emit_load_toc_table (int fromprolog)
25222 rtx dest;
25223 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25225 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25227 char buf[30];
25228 rtx lab, tmp1, tmp2, got;
25230 lab = gen_label_rtx ();
25231 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25232 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25233 if (flag_pic == 2)
25235 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25236 need_toc_init = 1;
25238 else
25239 got = rs6000_got_sym ();
25240 tmp1 = tmp2 = dest;
25241 if (!fromprolog)
25243 tmp1 = gen_reg_rtx (Pmode);
25244 tmp2 = gen_reg_rtx (Pmode);
25246 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25247 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25248 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25249 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25251 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25253 emit_insn (gen_load_toc_v4_pic_si ());
25254 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25256 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25258 char buf[30];
25259 rtx temp0 = (fromprolog
25260 ? gen_rtx_REG (Pmode, 0)
25261 : gen_reg_rtx (Pmode));
25263 if (fromprolog)
25265 rtx symF, symL;
25267 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25268 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25270 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25271 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25273 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25274 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25275 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25277 else
25279 rtx tocsym, lab;
25281 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25282 need_toc_init = 1;
25283 lab = gen_label_rtx ();
25284 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25285 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25286 if (TARGET_LINK_STACK)
25287 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25288 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25290 emit_insn (gen_addsi3 (dest, temp0, dest));
25292 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25294 /* This is for AIX code running in non-PIC ELF32. */
25295 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25297 need_toc_init = 1;
25298 emit_insn (gen_elf_high (dest, realsym));
25299 emit_insn (gen_elf_low (dest, dest, realsym));
25301 else
25303 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25305 if (TARGET_32BIT)
25306 emit_insn (gen_load_toc_aix_si (dest));
25307 else
25308 emit_insn (gen_load_toc_aix_di (dest));
25312 /* Emit instructions to restore the link register after determining where
25313 its value has been stored. */
25315 void
25316 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25318 rs6000_stack_t *info = rs6000_stack_info ();
25319 rtx operands[2];
25321 operands[0] = source;
25322 operands[1] = scratch;
25324 if (info->lr_save_p)
25326 rtx frame_rtx = stack_pointer_rtx;
25327 HOST_WIDE_INT sp_offset = 0;
25328 rtx tmp;
25330 if (frame_pointer_needed
25331 || cfun->calls_alloca
25332 || info->total_size > 32767)
25334 tmp = gen_frame_mem (Pmode, frame_rtx);
25335 emit_move_insn (operands[1], tmp);
25336 frame_rtx = operands[1];
25338 else if (info->push_p)
25339 sp_offset = info->total_size;
25341 tmp = plus_constant (Pmode, frame_rtx,
25342 info->lr_save_offset + sp_offset);
25343 tmp = gen_frame_mem (Pmode, tmp);
25344 emit_move_insn (tmp, operands[0]);
25346 else
25347 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25349 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25350 state of lr_save_p so any change from here on would be a bug. In
25351 particular, stop rs6000_ra_ever_killed from considering the SET
25352 of lr we may have added just above. */
25353 cfun->machine->lr_save_state = info->lr_save_p + 1;
25356 static GTY(()) alias_set_type set = -1;
25358 alias_set_type
25359 get_TOC_alias_set (void)
25361 if (set == -1)
25362 set = new_alias_set ();
25363 return set;
25366 /* This returns nonzero if the current function uses the TOC. This is
25367 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25368 is generated by the ABI_V4 load_toc_* patterns. */
25369 #if TARGET_ELF
25370 static int
25371 uses_TOC (void)
25373 rtx_insn *insn;
25375 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25376 if (INSN_P (insn))
25378 rtx pat = PATTERN (insn);
25379 int i;
25381 if (GET_CODE (pat) == PARALLEL)
25382 for (i = 0; i < XVECLEN (pat, 0); i++)
25384 rtx sub = XVECEXP (pat, 0, i);
25385 if (GET_CODE (sub) == USE)
25387 sub = XEXP (sub, 0);
25388 if (GET_CODE (sub) == UNSPEC
25389 && XINT (sub, 1) == UNSPEC_TOC)
25390 return 1;
25394 return 0;
25396 #endif
25399 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25401 rtx tocrel, tocreg, hi;
25403 if (TARGET_DEBUG_ADDR)
25405 if (GET_CODE (symbol) == SYMBOL_REF)
25406 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25407 XSTR (symbol, 0));
25408 else
25410 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25411 GET_RTX_NAME (GET_CODE (symbol)));
25412 debug_rtx (symbol);
25416 if (!can_create_pseudo_p ())
25417 df_set_regs_ever_live (TOC_REGISTER, true);
25419 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25420 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25421 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25422 return tocrel;
25424 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25425 if (largetoc_reg != NULL)
25427 emit_move_insn (largetoc_reg, hi);
25428 hi = largetoc_reg;
25430 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25433 /* Issue assembly directives that create a reference to the given DWARF
25434 FRAME_TABLE_LABEL from the current function section. */
25435 void
25436 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25438 fprintf (asm_out_file, "\t.ref %s\n",
25439 (* targetm.strip_name_encoding) (frame_table_label));
25442 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25443 and the change to the stack pointer. */
25445 static void
25446 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25448 rtvec p;
25449 int i;
25450 rtx regs[3];
25452 i = 0;
25453 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25454 if (hard_frame_needed)
25455 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25456 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25457 || (hard_frame_needed
25458 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25459 regs[i++] = fp;
25461 p = rtvec_alloc (i);
25462 while (--i >= 0)
25464 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25465 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25468 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25471 /* Emit the correct code for allocating stack space, as insns.
25472 If COPY_REG, make sure a copy of the old frame is left there.
25473 The generated code may use hard register 0 as a temporary. */
25475 static rtx_insn *
25476 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25478 rtx_insn *insn;
25479 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25480 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25481 rtx todec = gen_int_mode (-size, Pmode);
25482 rtx par, set, mem;
25484 if (INTVAL (todec) != -size)
25486 warning (0, "stack frame too large");
25487 emit_insn (gen_trap ());
25488 return 0;
25491 if (crtl->limit_stack)
25493 if (REG_P (stack_limit_rtx)
25494 && REGNO (stack_limit_rtx) > 1
25495 && REGNO (stack_limit_rtx) <= 31)
25497 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
25498 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25499 const0_rtx));
25501 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25502 && TARGET_32BIT
25503 && DEFAULT_ABI == ABI_V4)
25505 rtx toload = gen_rtx_CONST (VOIDmode,
25506 gen_rtx_PLUS (Pmode,
25507 stack_limit_rtx,
25508 GEN_INT (size)));
25510 emit_insn (gen_elf_high (tmp_reg, toload));
25511 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25512 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25513 const0_rtx));
25515 else
25516 warning (0, "stack limit expression is not supported");
25519 if (copy_reg)
25521 if (copy_off != 0)
25522 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25523 else
25524 emit_move_insn (copy_reg, stack_reg);
25527 if (size > 32767)
25529 /* Need a note here so that try_split doesn't get confused. */
25530 if (get_last_insn () == NULL_RTX)
25531 emit_note (NOTE_INSN_DELETED);
25532 insn = emit_move_insn (tmp_reg, todec);
25533 try_split (PATTERN (insn), insn, 0);
25534 todec = tmp_reg;
25537 insn = emit_insn (TARGET_32BIT
25538 ? gen_movsi_update_stack (stack_reg, stack_reg,
25539 todec, stack_reg)
25540 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25541 todec, stack_reg));
25542 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25543 it now and set the alias set/attributes. The above gen_*_update
25544 calls will generate a PARALLEL with the MEM set being the first
25545 operation. */
25546 par = PATTERN (insn);
25547 gcc_assert (GET_CODE (par) == PARALLEL);
25548 set = XVECEXP (par, 0, 0);
25549 gcc_assert (GET_CODE (set) == SET);
25550 mem = SET_DEST (set);
25551 gcc_assert (MEM_P (mem));
25552 MEM_NOTRAP_P (mem) = 1;
25553 set_mem_alias_set (mem, get_frame_alias_set ());
25555 RTX_FRAME_RELATED_P (insn) = 1;
25556 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25557 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25558 GEN_INT (-size))));
25559 return insn;
25562 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25564 #if PROBE_INTERVAL > 32768
25565 #error Cannot use indexed addressing mode for stack probing
25566 #endif
25568 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25569 inclusive. These are offsets from the current stack pointer. */
25571 static void
25572 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25574 /* See if we have a constant small number of probes to generate. If so,
25575 that's the easy case. */
25576 if (first + size <= 32768)
25578 HOST_WIDE_INT i;
25580 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25581 it exceeds SIZE. If only one probe is needed, this will not
25582 generate any code. Then probe at FIRST + SIZE. */
25583 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25584 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25585 -(first + i)));
25587 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25588 -(first + size)));
25591 /* Otherwise, do the same as above, but in a loop. Note that we must be
25592 extra careful with variables wrapping around because we might be at
25593 the very top (or the very bottom) of the address space and we have
25594 to be able to handle this case properly; in particular, we use an
25595 equality test for the loop condition. */
25596 else
25598 HOST_WIDE_INT rounded_size;
25599 rtx r12 = gen_rtx_REG (Pmode, 12);
25600 rtx r0 = gen_rtx_REG (Pmode, 0);
25602 /* Sanity check for the addressing mode we're going to use. */
25603 gcc_assert (first <= 32768);
25605 /* Step 1: round SIZE to the previous multiple of the interval. */
25607 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25610 /* Step 2: compute initial and final value of the loop counter. */
25612 /* TEST_ADDR = SP + FIRST. */
25613 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25614 -first)));
25616 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25617 if (rounded_size > 32768)
25619 emit_move_insn (r0, GEN_INT (-rounded_size));
25620 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25622 else
25623 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25624 -rounded_size)));
25627 /* Step 3: the loop
25631 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25632 probe at TEST_ADDR
25634 while (TEST_ADDR != LAST_ADDR)
25636 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25637 until it is equal to ROUNDED_SIZE. */
25639 if (TARGET_64BIT)
25640 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25641 else
25642 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25645 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25646 that SIZE is equal to ROUNDED_SIZE. */
25648 if (size != rounded_size)
25649 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25653 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25654 absolute addresses. */
25656 const char *
25657 output_probe_stack_range (rtx reg1, rtx reg2)
25659 static int labelno = 0;
25660 char loop_lab[32];
25661 rtx xops[2];
25663 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25665 /* Loop. */
25666 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25668 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25669 xops[0] = reg1;
25670 xops[1] = GEN_INT (-PROBE_INTERVAL);
25671 output_asm_insn ("addi %0,%0,%1", xops);
25673 /* Probe at TEST_ADDR. */
25674 xops[1] = gen_rtx_REG (Pmode, 0);
25675 output_asm_insn ("stw %1,0(%0)", xops);
25677 /* Test if TEST_ADDR == LAST_ADDR. */
25678 xops[1] = reg2;
25679 if (TARGET_64BIT)
25680 output_asm_insn ("cmpd 0,%0,%1", xops);
25681 else
25682 output_asm_insn ("cmpw 0,%0,%1", xops);
25684 /* Branch. */
25685 fputs ("\tbne 0,", asm_out_file);
25686 assemble_name_raw (asm_out_file, loop_lab);
25687 fputc ('\n', asm_out_file);
25689 return "";
25692 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25693 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25694 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25695 deduce these equivalences by itself so it wasn't necessary to hold
25696 its hand so much. Don't be tempted to always supply d2_f_d_e with
25697 the actual cfa register, ie. r31 when we are using a hard frame
25698 pointer. That fails when saving regs off r1, and sched moves the
25699 r31 setup past the reg saves. */
25701 static rtx
25702 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
25703 rtx reg2, rtx repl2)
25705 rtx repl;
25707 if (REGNO (reg) == STACK_POINTER_REGNUM)
25709 gcc_checking_assert (val == 0);
25710 repl = NULL_RTX;
25712 else
25713 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25714 GEN_INT (val));
25716 rtx pat = PATTERN (insn);
25717 if (!repl && !reg2)
25719 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25720 if (GET_CODE (pat) == PARALLEL)
25721 for (int i = 0; i < XVECLEN (pat, 0); i++)
25722 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25724 rtx set = XVECEXP (pat, 0, i);
25726 /* If this PARALLEL has been emitted for out-of-line
25727 register save functions, or store multiple, then omit
25728 eh_frame info for any user-defined global regs. If
25729 eh_frame info is supplied, frame unwinding will
25730 restore a user reg. */
25731 if (!REG_P (SET_SRC (set))
25732 || !fixed_reg_p (REGNO (SET_SRC (set))))
25733 RTX_FRAME_RELATED_P (set) = 1;
25735 RTX_FRAME_RELATED_P (insn) = 1;
25736 return insn;
25739 /* We expect that 'pat' is either a SET or a PARALLEL containing
25740 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25741 are important so they all have to be marked RTX_FRAME_RELATED_P.
25742 Call simplify_replace_rtx on the SETs rather than the whole insn
25743 so as to leave the other stuff alone (for example USE of r12). */
25745 if (GET_CODE (pat) == SET)
25747 if (repl)
25748 pat = simplify_replace_rtx (pat, reg, repl);
25749 if (reg2)
25750 pat = simplify_replace_rtx (pat, reg2, repl2);
25752 else if (GET_CODE (pat) == PARALLEL)
25754 pat = shallow_copy_rtx (pat);
25755 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25757 for (int i = 0; i < XVECLEN (pat, 0); i++)
25758 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25760 rtx set = XVECEXP (pat, 0, i);
25762 if (repl)
25763 set = simplify_replace_rtx (set, reg, repl);
25764 if (reg2)
25765 set = simplify_replace_rtx (set, reg2, repl2);
25766 XVECEXP (pat, 0, i) = set;
25768 /* Omit eh_frame info for any user-defined global regs. */
25769 if (!REG_P (SET_SRC (set))
25770 || !fixed_reg_p (REGNO (SET_SRC (set))))
25771 RTX_FRAME_RELATED_P (set) = 1;
25774 else
25775 gcc_unreachable ();
25777 RTX_FRAME_RELATED_P (insn) = 1;
25778 if (repl || reg2)
25779 add_reg_note (insn, REG_FRAME_RELATED_EXPR, pat);
25781 return insn;
25784 /* Returns an insn that has a vrsave set operation with the
25785 appropriate CLOBBERs. */
25787 static rtx
25788 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25790 int nclobs, i;
25791 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25792 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25794 clobs[0]
25795 = gen_rtx_SET (vrsave,
25796 gen_rtx_UNSPEC_VOLATILE (SImode,
25797 gen_rtvec (2, reg, vrsave),
25798 UNSPECV_SET_VRSAVE));
25800 nclobs = 1;
25802 /* We need to clobber the registers in the mask so the scheduler
25803 does not move sets to VRSAVE before sets of AltiVec registers.
25805 However, if the function receives nonlocal gotos, reload will set
25806 all call saved registers live. We will end up with:
25808 (set (reg 999) (mem))
25809 (parallel [ (set (reg vrsave) (unspec blah))
25810 (clobber (reg 999))])
25812 The clobber will cause the store into reg 999 to be dead, and
25813 flow will attempt to delete an epilogue insn. In this case, we
25814 need an unspec use/set of the register. */
25816 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25817 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25819 if (!epiloguep || call_used_regs [i])
25820 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25821 gen_rtx_REG (V4SImode, i));
25822 else
25824 rtx reg = gen_rtx_REG (V4SImode, i);
25826 clobs[nclobs++]
25827 = gen_rtx_SET (reg,
25828 gen_rtx_UNSPEC (V4SImode,
25829 gen_rtvec (1, reg), 27));
25833 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25835 for (i = 0; i < nclobs; ++i)
25836 XVECEXP (insn, 0, i) = clobs[i];
25838 return insn;
25841 static rtx
25842 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25844 rtx addr, mem;
25846 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25847 mem = gen_frame_mem (GET_MODE (reg), addr);
25848 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25851 static rtx
25852 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25854 return gen_frame_set (reg, frame_reg, offset, false);
25857 static rtx
25858 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25860 return gen_frame_set (reg, frame_reg, offset, true);
25863 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25864 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25866 static rtx
25867 emit_frame_save (rtx frame_reg, machine_mode mode,
25868 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25870 rtx reg, insn;
25872 /* Some cases that need register indexed addressing. */
25873 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25874 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
25875 || (TARGET_E500_DOUBLE && mode == DFmode)
25876 || (TARGET_SPE_ABI
25877 && SPE_VECTOR_MODE (mode)
25878 && !SPE_CONST_OFFSET_OK (offset))));
25880 reg = gen_rtx_REG (mode, regno);
25881 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25882 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25883 NULL_RTX, NULL_RTX);
25886 /* Emit an offset memory reference suitable for a frame store, while
25887 converting to a valid addressing mode. */
25889 static rtx
25890 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25892 rtx int_rtx, offset_rtx;
25894 int_rtx = GEN_INT (offset);
25896 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
25897 || (TARGET_E500_DOUBLE && mode == DFmode))
25899 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
25900 emit_move_insn (offset_rtx, int_rtx);
25902 else
25903 offset_rtx = int_rtx;
25905 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
25908 #ifndef TARGET_FIX_AND_CONTINUE
25909 #define TARGET_FIX_AND_CONTINUE 0
25910 #endif
25912 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25913 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25914 #define LAST_SAVRES_REGISTER 31
25915 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25917 enum {
25918 SAVRES_LR = 0x1,
25919 SAVRES_SAVE = 0x2,
25920 SAVRES_REG = 0x0c,
25921 SAVRES_GPR = 0,
25922 SAVRES_FPR = 4,
25923 SAVRES_VR = 8
25926 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25928 /* Temporary holding space for an out-of-line register save/restore
25929 routine name. */
25930 static char savres_routine_name[30];
25932 /* Return the name for an out-of-line register save/restore routine.
25933 We are saving/restoring GPRs if GPR is true. */
25935 static char *
25936 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
25938 const char *prefix = "";
25939 const char *suffix = "";
25941 /* Different targets are supposed to define
25942 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25943 routine name could be defined with:
25945 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25947 This is a nice idea in practice, but in reality, things are
25948 complicated in several ways:
25950 - ELF targets have save/restore routines for GPRs.
25952 - SPE targets use different prefixes for 32/64-bit registers, and
25953 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
25955 - PPC64 ELF targets have routines for save/restore of GPRs that
25956 differ in what they do with the link register, so having a set
25957 prefix doesn't work. (We only use one of the save routines at
25958 the moment, though.)
25960 - PPC32 elf targets have "exit" versions of the restore routines
25961 that restore the link register and can save some extra space.
25962 These require an extra suffix. (There are also "tail" versions
25963 of the restore routines and "GOT" versions of the save routines,
25964 but we don't generate those at present. Same problems apply,
25965 though.)
25967 We deal with all this by synthesizing our own prefix/suffix and
25968 using that for the simple sprintf call shown above. */
25969 if (TARGET_SPE)
25971 /* No floating point saves on the SPE. */
25972 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
25974 if ((sel & SAVRES_SAVE))
25975 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
25976 else
25977 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
25979 if ((sel & SAVRES_LR))
25980 suffix = "_x";
25982 else if (DEFAULT_ABI == ABI_V4)
25984 if (TARGET_64BIT)
25985 goto aix_names;
25987 if ((sel & SAVRES_REG) == SAVRES_GPR)
25988 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25989 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25990 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25991 else if ((sel & SAVRES_REG) == SAVRES_VR)
25992 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25993 else
25994 abort ();
25996 if ((sel & SAVRES_LR))
25997 suffix = "_x";
25999 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26001 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26002 /* No out-of-line save/restore routines for GPRs on AIX. */
26003 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26004 #endif
26006 aix_names:
26007 if ((sel & SAVRES_REG) == SAVRES_GPR)
26008 prefix = ((sel & SAVRES_SAVE)
26009 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26010 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26011 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26013 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26014 if ((sel & SAVRES_LR))
26015 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26016 else
26017 #endif
26019 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26020 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26023 else if ((sel & SAVRES_REG) == SAVRES_VR)
26024 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26025 else
26026 abort ();
26029 if (DEFAULT_ABI == ABI_DARWIN)
26031 /* The Darwin approach is (slightly) different, in order to be
26032 compatible with code generated by the system toolchain. There is a
26033 single symbol for the start of save sequence, and the code here
26034 embeds an offset into that code on the basis of the first register
26035 to be saved. */
26036 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26037 if ((sel & SAVRES_REG) == SAVRES_GPR)
26038 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26039 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26040 (regno - 13) * 4, prefix, regno);
26041 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26042 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26043 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26044 else if ((sel & SAVRES_REG) == SAVRES_VR)
26045 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26046 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26047 else
26048 abort ();
26050 else
26051 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26053 return savres_routine_name;
26056 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26057 We are saving/restoring GPRs if GPR is true. */
26059 static rtx
26060 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26062 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26063 ? info->first_gp_reg_save
26064 : (sel & SAVRES_REG) == SAVRES_FPR
26065 ? info->first_fp_reg_save - 32
26066 : (sel & SAVRES_REG) == SAVRES_VR
26067 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26068 : -1);
26069 rtx sym;
26070 int select = sel;
26072 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
26073 versions of the gpr routines. */
26074 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
26075 && info->spe_64bit_regs_used)
26076 select ^= SAVRES_FPR ^ SAVRES_GPR;
26078 /* Don't generate bogus routine names. */
26079 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26080 && regno <= LAST_SAVRES_REGISTER
26081 && select >= 0 && select <= 12);
26083 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26085 if (sym == NULL)
26087 char *name;
26089 name = rs6000_savres_routine_name (info, regno, sel);
26091 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26092 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26093 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26096 return sym;
26099 /* Emit a sequence of insns, including a stack tie if needed, for
26100 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26101 reset the stack pointer, but move the base of the frame into
26102 reg UPDT_REGNO for use by out-of-line register restore routines. */
26104 static rtx
26105 rs6000_emit_stack_reset (rs6000_stack_t *info,
26106 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26107 unsigned updt_regno)
26109 rtx updt_reg_rtx;
26111 /* This blockage is needed so that sched doesn't decide to move
26112 the sp change before the register restores. */
26113 if (DEFAULT_ABI == ABI_V4
26114 || (TARGET_SPE_ABI
26115 && info->spe_64bit_regs_used != 0
26116 && info->first_gp_reg_save != 32))
26117 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
26119 /* If we are restoring registers out-of-line, we will be using the
26120 "exit" variants of the restore routines, which will reset the
26121 stack for us. But we do need to point updt_reg into the
26122 right place for those routines. */
26123 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26125 if (frame_off != 0)
26126 return emit_insn (gen_add3_insn (updt_reg_rtx,
26127 frame_reg_rtx, GEN_INT (frame_off)));
26128 else if (REGNO (frame_reg_rtx) != updt_regno)
26129 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26131 return NULL_RTX;
26134 /* Return the register number used as a pointer by out-of-line
26135 save/restore functions. */
26137 static inline unsigned
26138 ptr_regno_for_savres (int sel)
26140 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26141 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26142 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26145 /* Construct a parallel rtx describing the effect of a call to an
26146 out-of-line register save/restore routine, and emit the insn
26147 or jump_insn as appropriate. */
26149 static rtx
26150 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26151 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26152 machine_mode reg_mode, int sel)
26154 int i;
26155 int offset, start_reg, end_reg, n_regs, use_reg;
26156 int reg_size = GET_MODE_SIZE (reg_mode);
26157 rtx sym;
26158 rtvec p;
26159 rtx par, insn;
26161 offset = 0;
26162 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26163 ? info->first_gp_reg_save
26164 : (sel & SAVRES_REG) == SAVRES_FPR
26165 ? info->first_fp_reg_save
26166 : (sel & SAVRES_REG) == SAVRES_VR
26167 ? info->first_altivec_reg_save
26168 : -1);
26169 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26170 ? 32
26171 : (sel & SAVRES_REG) == SAVRES_FPR
26172 ? 64
26173 : (sel & SAVRES_REG) == SAVRES_VR
26174 ? LAST_ALTIVEC_REGNO + 1
26175 : -1);
26176 n_regs = end_reg - start_reg;
26177 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26178 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26179 + n_regs);
26181 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26182 RTVEC_ELT (p, offset++) = ret_rtx;
26184 RTVEC_ELT (p, offset++)
26185 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26187 sym = rs6000_savres_routine_sym (info, sel);
26188 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26190 use_reg = ptr_regno_for_savres (sel);
26191 if ((sel & SAVRES_REG) == SAVRES_VR)
26193 /* Vector regs are saved/restored using [reg+reg] addressing. */
26194 RTVEC_ELT (p, offset++)
26195 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26196 RTVEC_ELT (p, offset++)
26197 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26199 else
26200 RTVEC_ELT (p, offset++)
26201 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26203 for (i = 0; i < end_reg - start_reg; i++)
26204 RTVEC_ELT (p, i + offset)
26205 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26206 frame_reg_rtx, save_area_offset + reg_size * i,
26207 (sel & SAVRES_SAVE) != 0);
26209 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26210 RTVEC_ELT (p, i + offset)
26211 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26213 par = gen_rtx_PARALLEL (VOIDmode, p);
26215 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26217 insn = emit_jump_insn (par);
26218 JUMP_LABEL (insn) = ret_rtx;
26220 else
26221 insn = emit_insn (par);
26222 return insn;
26225 /* Emit code to store CR fields that need to be saved into REG. */
26227 static void
26228 rs6000_emit_move_from_cr (rtx reg)
26230 /* Only the ELFv2 ABI allows storing only selected fields. */
26231 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26233 int i, cr_reg[8], count = 0;
26235 /* Collect CR fields that must be saved. */
26236 for (i = 0; i < 8; i++)
26237 if (save_reg_p (CR0_REGNO + i))
26238 cr_reg[count++] = i;
26240 /* If it's just a single one, use mfcrf. */
26241 if (count == 1)
26243 rtvec p = rtvec_alloc (1);
26244 rtvec r = rtvec_alloc (2);
26245 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26246 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26247 RTVEC_ELT (p, 0)
26248 = gen_rtx_SET (reg,
26249 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26251 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26252 return;
26255 /* ??? It might be better to handle count == 2 / 3 cases here
26256 as well, using logical operations to combine the values. */
26259 emit_insn (gen_movesi_from_cr (reg));
26262 /* Return whether the split-stack arg pointer (r12) is used. */
26264 static bool
26265 split_stack_arg_pointer_used_p (void)
26267 /* If the pseudo holding the arg pointer is no longer a pseudo,
26268 then the arg pointer is used. */
26269 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26270 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26271 || (REGNO (cfun->machine->split_stack_arg_pointer)
26272 < FIRST_PSEUDO_REGISTER)))
26273 return true;
26275 /* Unfortunately we also need to do some code scanning, since
26276 r12 may have been substituted for the pseudo. */
26277 rtx_insn *insn;
26278 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26279 FOR_BB_INSNS (bb, insn)
26280 if (NONDEBUG_INSN_P (insn))
26282 /* A call destroys r12. */
26283 if (CALL_P (insn))
26284 return false;
26286 df_ref use;
26287 FOR_EACH_INSN_USE (use, insn)
26289 rtx x = DF_REF_REG (use);
26290 if (REG_P (x) && REGNO (x) == 12)
26291 return true;
26293 df_ref def;
26294 FOR_EACH_INSN_DEF (def, insn)
26296 rtx x = DF_REF_REG (def);
26297 if (REG_P (x) && REGNO (x) == 12)
26298 return false;
26301 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26304 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26306 static bool
26307 rs6000_global_entry_point_needed_p (void)
26309 /* Only needed for the ELFv2 ABI. */
26310 if (DEFAULT_ABI != ABI_ELFv2)
26311 return false;
26313 /* With -msingle-pic-base, we assume the whole program shares the same
26314 TOC, so no global entry point prologues are needed anywhere. */
26315 if (TARGET_SINGLE_PIC_BASE)
26316 return false;
26318 /* Ensure we have a global entry point for thunks. ??? We could
26319 avoid that if the target routine doesn't need a global entry point,
26320 but we do not know whether this is the case at this point. */
26321 if (cfun->is_thunk)
26322 return true;
26324 /* For regular functions, rs6000_emit_prologue sets this flag if the
26325 routine ever uses the TOC pointer. */
26326 return cfun->machine->r2_setup_needed;
26329 /* Emit function prologue as insns. */
26331 void
26332 rs6000_emit_prologue (void)
26334 rs6000_stack_t *info = rs6000_stack_info ();
26335 machine_mode reg_mode = Pmode;
26336 int reg_size = TARGET_32BIT ? 4 : 8;
26337 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26338 rtx frame_reg_rtx = sp_reg_rtx;
26339 unsigned int cr_save_regno;
26340 rtx cr_save_rtx = NULL_RTX;
26341 rtx insn;
26342 int strategy;
26343 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26344 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26345 && call_used_regs[STATIC_CHAIN_REGNUM]);
26346 int using_split_stack = (flag_split_stack
26347 && (lookup_attribute ("no_split_stack",
26348 DECL_ATTRIBUTES (cfun->decl))
26349 == NULL));
26351 /* Offset to top of frame for frame_reg and sp respectively. */
26352 HOST_WIDE_INT frame_off = 0;
26353 HOST_WIDE_INT sp_off = 0;
26354 /* sp_adjust is the stack adjusting instruction, tracked so that the
26355 insn setting up the split-stack arg pointer can be emitted just
26356 prior to it, when r12 is not used here for other purposes. */
26357 rtx_insn *sp_adjust = 0;
26359 #if CHECKING_P
26360 /* Track and check usage of r0, r11, r12. */
26361 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26362 #define START_USE(R) do \
26364 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26365 reg_inuse |= 1 << (R); \
26366 } while (0)
26367 #define END_USE(R) do \
26369 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26370 reg_inuse &= ~(1 << (R)); \
26371 } while (0)
26372 #define NOT_INUSE(R) do \
26374 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26375 } while (0)
26376 #else
26377 #define START_USE(R) do {} while (0)
26378 #define END_USE(R) do {} while (0)
26379 #define NOT_INUSE(R) do {} while (0)
26380 #endif
26382 if (DEFAULT_ABI == ABI_ELFv2
26383 && !TARGET_SINGLE_PIC_BASE)
26385 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26387 /* With -mminimal-toc we may generate an extra use of r2 below. */
26388 if (TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
26389 cfun->machine->r2_setup_needed = true;
26393 if (flag_stack_usage_info)
26394 current_function_static_stack_size = info->total_size;
26396 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26398 HOST_WIDE_INT size = info->total_size;
26400 if (crtl->is_leaf && !cfun->calls_alloca)
26402 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26403 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26404 size - STACK_CHECK_PROTECT);
26406 else if (size > 0)
26407 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26410 if (TARGET_FIX_AND_CONTINUE)
26412 /* gdb on darwin arranges to forward a function from the old
26413 address by modifying the first 5 instructions of the function
26414 to branch to the overriding function. This is necessary to
26415 permit function pointers that point to the old function to
26416 actually forward to the new function. */
26417 emit_insn (gen_nop ());
26418 emit_insn (gen_nop ());
26419 emit_insn (gen_nop ());
26420 emit_insn (gen_nop ());
26421 emit_insn (gen_nop ());
26424 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
26426 reg_mode = V2SImode;
26427 reg_size = 8;
26430 /* Handle world saves specially here. */
26431 if (WORLD_SAVE_P (info))
26433 int i, j, sz;
26434 rtx treg;
26435 rtvec p;
26436 rtx reg0;
26438 /* save_world expects lr in r0. */
26439 reg0 = gen_rtx_REG (Pmode, 0);
26440 if (info->lr_save_p)
26442 insn = emit_move_insn (reg0,
26443 gen_rtx_REG (Pmode, LR_REGNO));
26444 RTX_FRAME_RELATED_P (insn) = 1;
26447 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26448 assumptions about the offsets of various bits of the stack
26449 frame. */
26450 gcc_assert (info->gp_save_offset == -220
26451 && info->fp_save_offset == -144
26452 && info->lr_save_offset == 8
26453 && info->cr_save_offset == 4
26454 && info->push_p
26455 && info->lr_save_p
26456 && (!crtl->calls_eh_return
26457 || info->ehrd_offset == -432)
26458 && info->vrsave_save_offset == -224
26459 && info->altivec_save_offset == -416);
26461 treg = gen_rtx_REG (SImode, 11);
26462 emit_move_insn (treg, GEN_INT (-info->total_size));
26464 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26465 in R11. It also clobbers R12, so beware! */
26467 /* Preserve CR2 for save_world prologues */
26468 sz = 5;
26469 sz += 32 - info->first_gp_reg_save;
26470 sz += 64 - info->first_fp_reg_save;
26471 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26472 p = rtvec_alloc (sz);
26473 j = 0;
26474 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26475 gen_rtx_REG (SImode,
26476 LR_REGNO));
26477 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26478 gen_rtx_SYMBOL_REF (Pmode,
26479 "*save_world"));
26480 /* We do floats first so that the instruction pattern matches
26481 properly. */
26482 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26483 RTVEC_ELT (p, j++)
26484 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26485 ? DFmode : SFmode,
26486 info->first_fp_reg_save + i),
26487 frame_reg_rtx,
26488 info->fp_save_offset + frame_off + 8 * i);
26489 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26490 RTVEC_ELT (p, j++)
26491 = gen_frame_store (gen_rtx_REG (V4SImode,
26492 info->first_altivec_reg_save + i),
26493 frame_reg_rtx,
26494 info->altivec_save_offset + frame_off + 16 * i);
26495 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26496 RTVEC_ELT (p, j++)
26497 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26498 frame_reg_rtx,
26499 info->gp_save_offset + frame_off + reg_size * i);
26501 /* CR register traditionally saved as CR2. */
26502 RTVEC_ELT (p, j++)
26503 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26504 frame_reg_rtx, info->cr_save_offset + frame_off);
26505 /* Explain about use of R0. */
26506 if (info->lr_save_p)
26507 RTVEC_ELT (p, j++)
26508 = gen_frame_store (reg0,
26509 frame_reg_rtx, info->lr_save_offset + frame_off);
26510 /* Explain what happens to the stack pointer. */
26512 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26513 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26516 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26517 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26518 treg, GEN_INT (-info->total_size));
26519 sp_off = frame_off = info->total_size;
26522 strategy = info->savres_strategy;
26524 /* For V.4, update stack before we do any saving and set back pointer. */
26525 if (! WORLD_SAVE_P (info)
26526 && info->push_p
26527 && (DEFAULT_ABI == ABI_V4
26528 || crtl->calls_eh_return))
26530 bool need_r11 = (TARGET_SPE
26531 ? (!(strategy & SAVE_INLINE_GPRS)
26532 && info->spe_64bit_regs_used == 0)
26533 : (!(strategy & SAVE_INLINE_FPRS)
26534 || !(strategy & SAVE_INLINE_GPRS)
26535 || !(strategy & SAVE_INLINE_VRS)));
26536 int ptr_regno = -1;
26537 rtx ptr_reg = NULL_RTX;
26538 int ptr_off = 0;
26540 if (info->total_size < 32767)
26541 frame_off = info->total_size;
26542 else if (need_r11)
26543 ptr_regno = 11;
26544 else if (info->cr_save_p
26545 || info->lr_save_p
26546 || info->first_fp_reg_save < 64
26547 || info->first_gp_reg_save < 32
26548 || info->altivec_size != 0
26549 || info->vrsave_size != 0
26550 || crtl->calls_eh_return)
26551 ptr_regno = 12;
26552 else
26554 /* The prologue won't be saving any regs so there is no need
26555 to set up a frame register to access any frame save area.
26556 We also won't be using frame_off anywhere below, but set
26557 the correct value anyway to protect against future
26558 changes to this function. */
26559 frame_off = info->total_size;
26561 if (ptr_regno != -1)
26563 /* Set up the frame offset to that needed by the first
26564 out-of-line save function. */
26565 START_USE (ptr_regno);
26566 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26567 frame_reg_rtx = ptr_reg;
26568 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26569 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26570 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26571 ptr_off = info->gp_save_offset + info->gp_size;
26572 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26573 ptr_off = info->altivec_save_offset + info->altivec_size;
26574 frame_off = -ptr_off;
26576 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26577 ptr_reg, ptr_off);
26578 if (REGNO (frame_reg_rtx) == 12)
26579 sp_adjust = 0;
26580 sp_off = info->total_size;
26581 if (frame_reg_rtx != sp_reg_rtx)
26582 rs6000_emit_stack_tie (frame_reg_rtx, false);
26585 /* If we use the link register, get it into r0. */
26586 if (!WORLD_SAVE_P (info) && info->lr_save_p)
26588 rtx addr, reg, mem;
26590 reg = gen_rtx_REG (Pmode, 0);
26591 START_USE (0);
26592 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26593 RTX_FRAME_RELATED_P (insn) = 1;
26595 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26596 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26598 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26599 GEN_INT (info->lr_save_offset + frame_off));
26600 mem = gen_rtx_MEM (Pmode, addr);
26601 /* This should not be of rs6000_sr_alias_set, because of
26602 __builtin_return_address. */
26604 insn = emit_move_insn (mem, reg);
26605 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26606 NULL_RTX, NULL_RTX);
26607 END_USE (0);
26611 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26612 r12 will be needed by out-of-line gpr restore. */
26613 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26614 && !(strategy & (SAVE_INLINE_GPRS
26615 | SAVE_NOINLINE_GPRS_SAVES_LR))
26616 ? 11 : 12);
26617 if (!WORLD_SAVE_P (info)
26618 && info->cr_save_p
26619 && REGNO (frame_reg_rtx) != cr_save_regno
26620 && !(using_static_chain_p && cr_save_regno == 11)
26621 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26623 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26624 START_USE (cr_save_regno);
26625 rs6000_emit_move_from_cr (cr_save_rtx);
26628 /* Do any required saving of fpr's. If only one or two to save, do
26629 it ourselves. Otherwise, call function. */
26630 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26632 int i;
26633 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26634 if (save_reg_p (info->first_fp_reg_save + i))
26635 emit_frame_save (frame_reg_rtx,
26636 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26637 ? DFmode : SFmode),
26638 info->first_fp_reg_save + i,
26639 info->fp_save_offset + frame_off + 8 * i,
26640 sp_off - frame_off);
26642 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26644 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26645 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26646 unsigned ptr_regno = ptr_regno_for_savres (sel);
26647 rtx ptr_reg = frame_reg_rtx;
26649 if (REGNO (frame_reg_rtx) == ptr_regno)
26650 gcc_checking_assert (frame_off == 0);
26651 else
26653 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26654 NOT_INUSE (ptr_regno);
26655 emit_insn (gen_add3_insn (ptr_reg,
26656 frame_reg_rtx, GEN_INT (frame_off)));
26658 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26659 info->fp_save_offset,
26660 info->lr_save_offset,
26661 DFmode, sel);
26662 rs6000_frame_related (insn, ptr_reg, sp_off,
26663 NULL_RTX, NULL_RTX);
26664 if (lr)
26665 END_USE (0);
26668 /* Save GPRs. This is done as a PARALLEL if we are using
26669 the store-multiple instructions. */
26670 if (!WORLD_SAVE_P (info)
26671 && TARGET_SPE_ABI
26672 && info->spe_64bit_regs_used != 0
26673 && info->first_gp_reg_save != 32)
26675 int i;
26676 rtx spe_save_area_ptr;
26677 HOST_WIDE_INT save_off;
26678 int ool_adjust = 0;
26680 /* Determine whether we can address all of the registers that need
26681 to be saved with an offset from frame_reg_rtx that fits in
26682 the small const field for SPE memory instructions. */
26683 int spe_regs_addressable
26684 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
26685 + reg_size * (32 - info->first_gp_reg_save - 1))
26686 && (strategy & SAVE_INLINE_GPRS));
26688 if (spe_regs_addressable)
26690 spe_save_area_ptr = frame_reg_rtx;
26691 save_off = frame_off;
26693 else
26695 /* Make r11 point to the start of the SPE save area. We need
26696 to be careful here if r11 is holding the static chain. If
26697 it is, then temporarily save it in r0. */
26698 HOST_WIDE_INT offset;
26700 if (!(strategy & SAVE_INLINE_GPRS))
26701 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
26702 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
26703 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
26704 save_off = frame_off - offset;
26706 if (using_static_chain_p)
26708 rtx r0 = gen_rtx_REG (Pmode, 0);
26710 START_USE (0);
26711 gcc_assert (info->first_gp_reg_save > 11);
26713 emit_move_insn (r0, spe_save_area_ptr);
26715 else if (REGNO (frame_reg_rtx) != 11)
26716 START_USE (11);
26718 emit_insn (gen_addsi3 (spe_save_area_ptr,
26719 frame_reg_rtx, GEN_INT (offset)));
26720 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
26721 frame_off = -info->spe_gp_save_offset + ool_adjust;
26724 if ((strategy & SAVE_INLINE_GPRS))
26726 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26727 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
26728 emit_frame_save (spe_save_area_ptr, reg_mode,
26729 info->first_gp_reg_save + i,
26730 (info->spe_gp_save_offset + save_off
26731 + reg_size * i),
26732 sp_off - save_off);
26734 else
26736 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
26737 info->spe_gp_save_offset + save_off,
26738 0, reg_mode,
26739 SAVRES_SAVE | SAVRES_GPR);
26741 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
26742 NULL_RTX, NULL_RTX);
26745 /* Move the static chain pointer back. */
26746 if (!spe_regs_addressable)
26748 if (using_static_chain_p)
26750 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
26751 END_USE (0);
26753 else if (REGNO (frame_reg_rtx) != 11)
26754 END_USE (11);
26757 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26759 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26760 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26761 unsigned ptr_regno = ptr_regno_for_savres (sel);
26762 rtx ptr_reg = frame_reg_rtx;
26763 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26764 int end_save = info->gp_save_offset + info->gp_size;
26765 int ptr_off;
26767 if (ptr_regno == 12)
26768 sp_adjust = 0;
26769 if (!ptr_set_up)
26770 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26772 /* Need to adjust r11 (r12) if we saved any FPRs. */
26773 if (end_save + frame_off != 0)
26775 rtx offset = GEN_INT (end_save + frame_off);
26777 if (ptr_set_up)
26778 frame_off = -end_save;
26779 else
26780 NOT_INUSE (ptr_regno);
26781 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26783 else if (!ptr_set_up)
26785 NOT_INUSE (ptr_regno);
26786 emit_move_insn (ptr_reg, frame_reg_rtx);
26788 ptr_off = -end_save;
26789 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26790 info->gp_save_offset + ptr_off,
26791 info->lr_save_offset + ptr_off,
26792 reg_mode, sel);
26793 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26794 NULL_RTX, NULL_RTX);
26795 if (lr)
26796 END_USE (0);
26798 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26800 rtvec p;
26801 int i;
26802 p = rtvec_alloc (32 - info->first_gp_reg_save);
26803 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26804 RTVEC_ELT (p, i)
26805 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26806 frame_reg_rtx,
26807 info->gp_save_offset + frame_off + reg_size * i);
26808 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26809 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26810 NULL_RTX, NULL_RTX);
26812 else if (!WORLD_SAVE_P (info))
26814 int i;
26815 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26816 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
26817 emit_frame_save (frame_reg_rtx, reg_mode,
26818 info->first_gp_reg_save + i,
26819 info->gp_save_offset + frame_off + reg_size * i,
26820 sp_off - frame_off);
26823 if (crtl->calls_eh_return)
26825 unsigned int i;
26826 rtvec p;
26828 for (i = 0; ; ++i)
26830 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26831 if (regno == INVALID_REGNUM)
26832 break;
26835 p = rtvec_alloc (i);
26837 for (i = 0; ; ++i)
26839 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26840 if (regno == INVALID_REGNUM)
26841 break;
26843 insn
26844 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
26845 sp_reg_rtx,
26846 info->ehrd_offset + sp_off + reg_size * (int) i);
26847 RTVEC_ELT (p, i) = insn;
26848 RTX_FRAME_RELATED_P (insn) = 1;
26851 insn = emit_insn (gen_blockage ());
26852 RTX_FRAME_RELATED_P (insn) = 1;
26853 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
26856 /* In AIX ABI we need to make sure r2 is really saved. */
26857 if (TARGET_AIX && crtl->calls_eh_return)
26859 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
26860 rtx save_insn, join_insn, note;
26861 long toc_restore_insn;
26863 tmp_reg = gen_rtx_REG (Pmode, 11);
26864 tmp_reg_si = gen_rtx_REG (SImode, 11);
26865 if (using_static_chain_p)
26867 START_USE (0);
26868 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
26870 else
26871 START_USE (11);
26872 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
26873 /* Peek at instruction to which this function returns. If it's
26874 restoring r2, then we know we've already saved r2. We can't
26875 unconditionally save r2 because the value we have will already
26876 be updated if we arrived at this function via a plt call or
26877 toc adjusting stub. */
26878 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
26879 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
26880 + RS6000_TOC_SAVE_SLOT);
26881 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
26882 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
26883 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
26884 validate_condition_mode (EQ, CCUNSmode);
26885 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
26886 emit_insn (gen_rtx_SET (compare_result,
26887 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
26888 toc_save_done = gen_label_rtx ();
26889 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26890 gen_rtx_EQ (VOIDmode, compare_result,
26891 const0_rtx),
26892 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
26893 pc_rtx);
26894 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26895 JUMP_LABEL (jump) = toc_save_done;
26896 LABEL_NUSES (toc_save_done) += 1;
26898 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
26899 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
26900 sp_off - frame_off);
26902 emit_label (toc_save_done);
26904 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26905 have a CFG that has different saves along different paths.
26906 Move the note to a dummy blockage insn, which describes that
26907 R2 is unconditionally saved after the label. */
26908 /* ??? An alternate representation might be a special insn pattern
26909 containing both the branch and the store. That might let the
26910 code that minimizes the number of DW_CFA_advance opcodes better
26911 freedom in placing the annotations. */
26912 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
26913 if (note)
26914 remove_note (save_insn, note);
26915 else
26916 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
26917 copy_rtx (PATTERN (save_insn)), NULL_RTX);
26918 RTX_FRAME_RELATED_P (save_insn) = 0;
26920 join_insn = emit_insn (gen_blockage ());
26921 REG_NOTES (join_insn) = note;
26922 RTX_FRAME_RELATED_P (join_insn) = 1;
26924 if (using_static_chain_p)
26926 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
26927 END_USE (0);
26929 else
26930 END_USE (11);
26933 /* Save CR if we use any that must be preserved. */
26934 if (!WORLD_SAVE_P (info) && info->cr_save_p)
26936 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26937 GEN_INT (info->cr_save_offset + frame_off));
26938 rtx mem = gen_frame_mem (SImode, addr);
26940 /* If we didn't copy cr before, do so now using r0. */
26941 if (cr_save_rtx == NULL_RTX)
26943 START_USE (0);
26944 cr_save_rtx = gen_rtx_REG (SImode, 0);
26945 rs6000_emit_move_from_cr (cr_save_rtx);
26948 /* Saving CR requires a two-instruction sequence: one instruction
26949 to move the CR to a general-purpose register, and a second
26950 instruction that stores the GPR to memory.
26952 We do not emit any DWARF CFI records for the first of these,
26953 because we cannot properly represent the fact that CR is saved in
26954 a register. One reason is that we cannot express that multiple
26955 CR fields are saved; another reason is that on 64-bit, the size
26956 of the CR register in DWARF (4 bytes) differs from the size of
26957 a general-purpose register.
26959 This means if any intervening instruction were to clobber one of
26960 the call-saved CR fields, we'd have incorrect CFI. To prevent
26961 this from happening, we mark the store to memory as a use of
26962 those CR fields, which prevents any such instruction from being
26963 scheduled in between the two instructions. */
26964 rtx crsave_v[9];
26965 int n_crsave = 0;
26966 int i;
26968 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
26969 for (i = 0; i < 8; i++)
26970 if (save_reg_p (CR0_REGNO + i))
26971 crsave_v[n_crsave++]
26972 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26974 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
26975 gen_rtvec_v (n_crsave, crsave_v)));
26976 END_USE (REGNO (cr_save_rtx));
26978 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
26979 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
26980 so we need to construct a frame expression manually. */
26981 RTX_FRAME_RELATED_P (insn) = 1;
26983 /* Update address to be stack-pointer relative, like
26984 rs6000_frame_related would do. */
26985 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26986 GEN_INT (info->cr_save_offset + sp_off));
26987 mem = gen_frame_mem (SImode, addr);
26989 if (DEFAULT_ABI == ABI_ELFv2)
26991 /* In the ELFv2 ABI we generate separate CFI records for each
26992 CR field that was actually saved. They all point to the
26993 same 32-bit stack slot. */
26994 rtx crframe[8];
26995 int n_crframe = 0;
26997 for (i = 0; i < 8; i++)
26998 if (save_reg_p (CR0_REGNO + i))
27000 crframe[n_crframe]
27001 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27003 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27004 n_crframe++;
27007 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27008 gen_rtx_PARALLEL (VOIDmode,
27009 gen_rtvec_v (n_crframe, crframe)));
27011 else
27013 /* In other ABIs, by convention, we use a single CR regnum to
27014 represent the fact that all call-saved CR fields are saved.
27015 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27016 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27017 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27021 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27022 *separate* slots if the routine calls __builtin_eh_return, so
27023 that they can be independently restored by the unwinder. */
27024 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27026 int i, cr_off = info->ehcr_offset;
27027 rtx crsave;
27029 /* ??? We might get better performance by using multiple mfocrf
27030 instructions. */
27031 crsave = gen_rtx_REG (SImode, 0);
27032 emit_insn (gen_movesi_from_cr (crsave));
27034 for (i = 0; i < 8; i++)
27035 if (!call_used_regs[CR0_REGNO + i])
27037 rtvec p = rtvec_alloc (2);
27038 RTVEC_ELT (p, 0)
27039 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27040 RTVEC_ELT (p, 1)
27041 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27043 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27045 RTX_FRAME_RELATED_P (insn) = 1;
27046 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27047 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27048 sp_reg_rtx, cr_off + sp_off));
27050 cr_off += reg_size;
27054 /* Update stack and set back pointer unless this is V.4,
27055 for which it was done previously. */
27056 if (!WORLD_SAVE_P (info) && info->push_p
27057 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27059 rtx ptr_reg = NULL;
27060 int ptr_off = 0;
27062 /* If saving altivec regs we need to be able to address all save
27063 locations using a 16-bit offset. */
27064 if ((strategy & SAVE_INLINE_VRS) == 0
27065 || (info->altivec_size != 0
27066 && (info->altivec_save_offset + info->altivec_size - 16
27067 + info->total_size - frame_off) > 32767)
27068 || (info->vrsave_size != 0
27069 && (info->vrsave_save_offset
27070 + info->total_size - frame_off) > 32767))
27072 int sel = SAVRES_SAVE | SAVRES_VR;
27073 unsigned ptr_regno = ptr_regno_for_savres (sel);
27075 if (using_static_chain_p
27076 && ptr_regno == STATIC_CHAIN_REGNUM)
27077 ptr_regno = 12;
27078 if (REGNO (frame_reg_rtx) != ptr_regno)
27079 START_USE (ptr_regno);
27080 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27081 frame_reg_rtx = ptr_reg;
27082 ptr_off = info->altivec_save_offset + info->altivec_size;
27083 frame_off = -ptr_off;
27085 else if (REGNO (frame_reg_rtx) == 1)
27086 frame_off = info->total_size;
27087 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27088 ptr_reg, ptr_off);
27089 if (REGNO (frame_reg_rtx) == 12)
27090 sp_adjust = 0;
27091 sp_off = info->total_size;
27092 if (frame_reg_rtx != sp_reg_rtx)
27093 rs6000_emit_stack_tie (frame_reg_rtx, false);
27096 /* Set frame pointer, if needed. */
27097 if (frame_pointer_needed)
27099 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27100 sp_reg_rtx);
27101 RTX_FRAME_RELATED_P (insn) = 1;
27104 /* Save AltiVec registers if needed. Save here because the red zone does
27105 not always include AltiVec registers. */
27106 if (!WORLD_SAVE_P (info)
27107 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27109 int end_save = info->altivec_save_offset + info->altivec_size;
27110 int ptr_off;
27111 /* Oddly, the vector save/restore functions point r0 at the end
27112 of the save area, then use r11 or r12 to load offsets for
27113 [reg+reg] addressing. */
27114 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27115 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27116 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27118 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27119 NOT_INUSE (0);
27120 if (scratch_regno == 12)
27121 sp_adjust = 0;
27122 if (end_save + frame_off != 0)
27124 rtx offset = GEN_INT (end_save + frame_off);
27126 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27128 else
27129 emit_move_insn (ptr_reg, frame_reg_rtx);
27131 ptr_off = -end_save;
27132 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27133 info->altivec_save_offset + ptr_off,
27134 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27135 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27136 NULL_RTX, NULL_RTX);
27137 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27139 /* The oddity mentioned above clobbered our frame reg. */
27140 emit_move_insn (frame_reg_rtx, ptr_reg);
27141 frame_off = ptr_off;
27144 else if (!WORLD_SAVE_P (info)
27145 && info->altivec_size != 0)
27147 int i;
27149 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27150 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27152 rtx areg, savereg, mem;
27153 HOST_WIDE_INT offset;
27155 offset = (info->altivec_save_offset + frame_off
27156 + 16 * (i - info->first_altivec_reg_save));
27158 savereg = gen_rtx_REG (V4SImode, i);
27160 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27162 mem = gen_frame_mem (V4SImode,
27163 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27164 GEN_INT (offset)));
27165 insn = emit_insn (gen_rtx_SET (mem, savereg));
27166 areg = NULL_RTX;
27168 else
27170 NOT_INUSE (0);
27171 areg = gen_rtx_REG (Pmode, 0);
27172 emit_move_insn (areg, GEN_INT (offset));
27174 /* AltiVec addressing mode is [reg+reg]. */
27175 mem = gen_frame_mem (V4SImode,
27176 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27178 /* Rather than emitting a generic move, force use of the stvx
27179 instruction, which we always want on ISA 2.07 (power8) systems.
27180 In particular we don't want xxpermdi/stxvd2x for little
27181 endian. */
27182 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27185 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27186 areg, GEN_INT (offset));
27190 /* VRSAVE is a bit vector representing which AltiVec registers
27191 are used. The OS uses this to determine which vector
27192 registers to save on a context switch. We need to save
27193 VRSAVE on the stack frame, add whatever AltiVec registers we
27194 used in this function, and do the corresponding magic in the
27195 epilogue. */
27197 if (!WORLD_SAVE_P (info)
27198 && info->vrsave_size != 0)
27200 rtx reg, vrsave;
27201 int offset;
27202 int save_regno;
27204 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
27205 be using r12 as frame_reg_rtx and r11 as the static chain
27206 pointer for nested functions. */
27207 save_regno = 12;
27208 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27209 && !using_static_chain_p)
27210 save_regno = 11;
27211 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27213 save_regno = 11;
27214 if (using_static_chain_p)
27215 save_regno = 0;
27218 NOT_INUSE (save_regno);
27219 reg = gen_rtx_REG (SImode, save_regno);
27220 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
27221 if (TARGET_MACHO)
27222 emit_insn (gen_get_vrsave_internal (reg));
27223 else
27224 emit_insn (gen_rtx_SET (reg, vrsave));
27226 /* Save VRSAVE. */
27227 offset = info->vrsave_save_offset + frame_off;
27228 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
27230 /* Include the registers in the mask. */
27231 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
27233 insn = emit_insn (generate_set_vrsave (reg, info, 0));
27236 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27237 if (!TARGET_SINGLE_PIC_BASE
27238 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
27239 || (DEFAULT_ABI == ABI_V4
27240 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27241 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27243 /* If emit_load_toc_table will use the link register, we need to save
27244 it. We use R12 for this purpose because emit_load_toc_table
27245 can use register 0. This allows us to use a plain 'blr' to return
27246 from the procedure more often. */
27247 int save_LR_around_toc_setup = (TARGET_ELF
27248 && DEFAULT_ABI == ABI_V4
27249 && flag_pic
27250 && ! info->lr_save_p
27251 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27252 if (save_LR_around_toc_setup)
27254 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27255 rtx tmp = gen_rtx_REG (Pmode, 12);
27257 sp_adjust = 0;
27258 insn = emit_move_insn (tmp, lr);
27259 RTX_FRAME_RELATED_P (insn) = 1;
27261 rs6000_emit_load_toc_table (TRUE);
27263 insn = emit_move_insn (lr, tmp);
27264 add_reg_note (insn, REG_CFA_RESTORE, lr);
27265 RTX_FRAME_RELATED_P (insn) = 1;
27267 else
27268 rs6000_emit_load_toc_table (TRUE);
27271 #if TARGET_MACHO
27272 if (!TARGET_SINGLE_PIC_BASE
27273 && DEFAULT_ABI == ABI_DARWIN
27274 && flag_pic && crtl->uses_pic_offset_table)
27276 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27277 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27279 /* Save and restore LR locally around this call (in R0). */
27280 if (!info->lr_save_p)
27281 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27283 emit_insn (gen_load_macho_picbase (src));
27285 emit_move_insn (gen_rtx_REG (Pmode,
27286 RS6000_PIC_OFFSET_TABLE_REGNUM),
27287 lr);
27289 if (!info->lr_save_p)
27290 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27292 #endif
27294 /* If we need to, save the TOC register after doing the stack setup.
27295 Do not emit eh frame info for this save. The unwinder wants info,
27296 conceptually attached to instructions in this function, about
27297 register values in the caller of this function. This R2 may have
27298 already been changed from the value in the caller.
27299 We don't attempt to write accurate DWARF EH frame info for R2
27300 because code emitted by gcc for a (non-pointer) function call
27301 doesn't save and restore R2. Instead, R2 is managed out-of-line
27302 by a linker generated plt call stub when the function resides in
27303 a shared library. This behavior is costly to describe in DWARF,
27304 both in terms of the size of DWARF info and the time taken in the
27305 unwinder to interpret it. R2 changes, apart from the
27306 calls_eh_return case earlier in this function, are handled by
27307 linux-unwind.h frob_update_context. */
27308 if (rs6000_save_toc_in_prologue_p ())
27310 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27311 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27314 if (using_split_stack && split_stack_arg_pointer_used_p ())
27316 /* Set up the arg pointer (r12) for -fsplit-stack code. If
27317 __morestack was called, it left the arg pointer to the old
27318 stack in r29. Otherwise, the arg pointer is the top of the
27319 current frame. */
27320 cfun->machine->split_stack_argp_used = true;
27321 if (sp_adjust)
27323 rtx r12 = gen_rtx_REG (Pmode, 12);
27324 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
27325 emit_insn_before (set_r12, sp_adjust);
27327 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
27329 rtx r12 = gen_rtx_REG (Pmode, 12);
27330 if (frame_off == 0)
27331 emit_move_insn (r12, frame_reg_rtx);
27332 else
27333 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
27335 if (info->push_p)
27337 rtx r12 = gen_rtx_REG (Pmode, 12);
27338 rtx r29 = gen_rtx_REG (Pmode, 29);
27339 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
27340 rtx not_more = gen_label_rtx ();
27341 rtx jump;
27343 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27344 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
27345 gen_rtx_LABEL_REF (VOIDmode, not_more),
27346 pc_rtx);
27347 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27348 JUMP_LABEL (jump) = not_more;
27349 LABEL_NUSES (not_more) += 1;
27350 emit_move_insn (r12, r29);
27351 emit_label (not_more);
27356 /* Output .extern statements for the save/restore routines we use. */
27358 static void
27359 rs6000_output_savres_externs (FILE *file)
27361 rs6000_stack_t *info = rs6000_stack_info ();
27363 if (TARGET_DEBUG_STACK)
27364 debug_stack_info (info);
27366 /* Write .extern for any function we will call to save and restore
27367 fp values. */
27368 if (info->first_fp_reg_save < 64
27369 && !TARGET_MACHO
27370 && !TARGET_ELF)
27372 char *name;
27373 int regno = info->first_fp_reg_save - 32;
27375 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27377 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27378 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27379 name = rs6000_savres_routine_name (info, regno, sel);
27380 fprintf (file, "\t.extern %s\n", name);
27382 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27384 bool lr = (info->savres_strategy
27385 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27386 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27387 name = rs6000_savres_routine_name (info, regno, sel);
27388 fprintf (file, "\t.extern %s\n", name);
27393 /* Write function prologue. */
27395 static void
27396 rs6000_output_function_prologue (FILE *file,
27397 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
27399 if (!cfun->is_thunk)
27400 rs6000_output_savres_externs (file);
27402 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27403 immediately after the global entry point label. */
27404 if (rs6000_global_entry_point_needed_p ())
27406 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27408 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27410 if (TARGET_CMODEL != CMODEL_LARGE)
27412 /* In the small and medium code models, we assume the TOC is less
27413 2 GB away from the text section, so it can be computed via the
27414 following two-instruction sequence. */
27415 char buf[256];
27417 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27418 fprintf (file, "0:\taddis 2,12,.TOC.-");
27419 assemble_name (file, buf);
27420 fprintf (file, "@ha\n");
27421 fprintf (file, "\taddi 2,2,.TOC.-");
27422 assemble_name (file, buf);
27423 fprintf (file, "@l\n");
27425 else
27427 /* In the large code model, we allow arbitrary offsets between the
27428 TOC and the text section, so we have to load the offset from
27429 memory. The data field is emitted directly before the global
27430 entry point in rs6000_elf_declare_function_name. */
27431 char buf[256];
27433 #ifdef HAVE_AS_ENTRY_MARKERS
27434 /* If supported by the linker, emit a marker relocation. If the
27435 total code size of the final executable or shared library
27436 happens to fit into 2 GB after all, the linker will replace
27437 this code sequence with the sequence for the small or medium
27438 code model. */
27439 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27440 #endif
27441 fprintf (file, "\tld 2,");
27442 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27443 assemble_name (file, buf);
27444 fprintf (file, "-");
27445 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27446 assemble_name (file, buf);
27447 fprintf (file, "(12)\n");
27448 fprintf (file, "\tadd 2,2,12\n");
27451 fputs ("\t.localentry\t", file);
27452 assemble_name (file, name);
27453 fputs (",.-", file);
27454 assemble_name (file, name);
27455 fputs ("\n", file);
27458 /* Output -mprofile-kernel code. This needs to be done here instead of
27459 in output_function_profile since it must go after the ELFv2 ABI
27460 local entry point. */
27461 if (TARGET_PROFILE_KERNEL && crtl->profile)
27463 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27464 gcc_assert (!TARGET_32BIT);
27466 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27468 /* In the ELFv2 ABI we have no compiler stack word. It must be
27469 the resposibility of _mcount to preserve the static chain
27470 register if required. */
27471 if (DEFAULT_ABI != ABI_ELFv2
27472 && cfun->static_chain_decl != NULL)
27474 asm_fprintf (file, "\tstd %s,24(%s)\n",
27475 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27476 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27477 asm_fprintf (file, "\tld %s,24(%s)\n",
27478 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27480 else
27481 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27484 rs6000_pic_labelno++;
27487 /* -mprofile-kernel code calls mcount before the function prolog,
27488 so a profiled leaf function should stay a leaf function. */
27489 static bool
27490 rs6000_keep_leaf_when_profiled ()
27492 return TARGET_PROFILE_KERNEL;
27495 /* Non-zero if vmx regs are restored before the frame pop, zero if
27496 we restore after the pop when possible. */
27497 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27499 /* Restoring cr is a two step process: loading a reg from the frame
27500 save, then moving the reg to cr. For ABI_V4 we must let the
27501 unwinder know that the stack location is no longer valid at or
27502 before the stack deallocation, but we can't emit a cfa_restore for
27503 cr at the stack deallocation like we do for other registers.
27504 The trouble is that it is possible for the move to cr to be
27505 scheduled after the stack deallocation. So say exactly where cr
27506 is located on each of the two insns. */
27508 static rtx
27509 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27511 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27512 rtx reg = gen_rtx_REG (SImode, regno);
27513 rtx_insn *insn = emit_move_insn (reg, mem);
27515 if (!exit_func && DEFAULT_ABI == ABI_V4)
27517 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27518 rtx set = gen_rtx_SET (reg, cr);
27520 add_reg_note (insn, REG_CFA_REGISTER, set);
27521 RTX_FRAME_RELATED_P (insn) = 1;
27523 return reg;
27526 /* Reload CR from REG. */
27528 static void
27529 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27531 int count = 0;
27532 int i;
27534 if (using_mfcr_multiple)
27536 for (i = 0; i < 8; i++)
27537 if (save_reg_p (CR0_REGNO + i))
27538 count++;
27539 gcc_assert (count);
27542 if (using_mfcr_multiple && count > 1)
27544 rtx_insn *insn;
27545 rtvec p;
27546 int ndx;
27548 p = rtvec_alloc (count);
27550 ndx = 0;
27551 for (i = 0; i < 8; i++)
27552 if (save_reg_p (CR0_REGNO + i))
27554 rtvec r = rtvec_alloc (2);
27555 RTVEC_ELT (r, 0) = reg;
27556 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27557 RTVEC_ELT (p, ndx) =
27558 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27559 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27560 ndx++;
27562 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27563 gcc_assert (ndx == count);
27565 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27566 CR field separately. */
27567 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27569 for (i = 0; i < 8; i++)
27570 if (save_reg_p (CR0_REGNO + i))
27571 add_reg_note (insn, REG_CFA_RESTORE,
27572 gen_rtx_REG (SImode, CR0_REGNO + i));
27574 RTX_FRAME_RELATED_P (insn) = 1;
27577 else
27578 for (i = 0; i < 8; i++)
27579 if (save_reg_p (CR0_REGNO + i))
27581 rtx insn = emit_insn (gen_movsi_to_cr_one
27582 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27584 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27585 CR field separately, attached to the insn that in fact
27586 restores this particular CR field. */
27587 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27589 add_reg_note (insn, REG_CFA_RESTORE,
27590 gen_rtx_REG (SImode, CR0_REGNO + i));
27592 RTX_FRAME_RELATED_P (insn) = 1;
27596 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27597 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27598 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27600 rtx_insn *insn = get_last_insn ();
27601 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27603 add_reg_note (insn, REG_CFA_RESTORE, cr);
27604 RTX_FRAME_RELATED_P (insn) = 1;
27608 /* Like cr, the move to lr instruction can be scheduled after the
27609 stack deallocation, but unlike cr, its stack frame save is still
27610 valid. So we only need to emit the cfa_restore on the correct
27611 instruction. */
27613 static void
27614 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27616 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27617 rtx reg = gen_rtx_REG (Pmode, regno);
27619 emit_move_insn (reg, mem);
27622 static void
27623 restore_saved_lr (int regno, bool exit_func)
27625 rtx reg = gen_rtx_REG (Pmode, regno);
27626 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27627 rtx_insn *insn = emit_move_insn (lr, reg);
27629 if (!exit_func && flag_shrink_wrap)
27631 add_reg_note (insn, REG_CFA_RESTORE, lr);
27632 RTX_FRAME_RELATED_P (insn) = 1;
27636 static rtx
27637 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27639 if (DEFAULT_ABI == ABI_ELFv2)
27641 int i;
27642 for (i = 0; i < 8; i++)
27643 if (save_reg_p (CR0_REGNO + i))
27645 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27646 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27647 cfa_restores);
27650 else if (info->cr_save_p)
27651 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27652 gen_rtx_REG (SImode, CR2_REGNO),
27653 cfa_restores);
27655 if (info->lr_save_p)
27656 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27657 gen_rtx_REG (Pmode, LR_REGNO),
27658 cfa_restores);
27659 return cfa_restores;
27662 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27663 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27664 below stack pointer not cloberred by signals. */
27666 static inline bool
27667 offset_below_red_zone_p (HOST_WIDE_INT offset)
27669 return offset < (DEFAULT_ABI == ABI_V4
27671 : TARGET_32BIT ? -220 : -288);
27674 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27676 static void
27677 emit_cfa_restores (rtx cfa_restores)
27679 rtx_insn *insn = get_last_insn ();
27680 rtx *loc = &REG_NOTES (insn);
27682 while (*loc)
27683 loc = &XEXP (*loc, 1);
27684 *loc = cfa_restores;
27685 RTX_FRAME_RELATED_P (insn) = 1;
27688 /* Emit function epilogue as insns. */
27690 void
27691 rs6000_emit_epilogue (int sibcall)
27693 rs6000_stack_t *info;
27694 int restoring_GPRs_inline;
27695 int restoring_FPRs_inline;
27696 int using_load_multiple;
27697 int using_mtcr_multiple;
27698 int use_backchain_to_restore_sp;
27699 int restore_lr;
27700 int strategy;
27701 HOST_WIDE_INT frame_off = 0;
27702 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27703 rtx frame_reg_rtx = sp_reg_rtx;
27704 rtx cfa_restores = NULL_RTX;
27705 rtx insn;
27706 rtx cr_save_reg = NULL_RTX;
27707 machine_mode reg_mode = Pmode;
27708 int reg_size = TARGET_32BIT ? 4 : 8;
27709 int i;
27710 bool exit_func;
27711 unsigned ptr_regno;
27713 info = rs6000_stack_info ();
27715 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
27717 reg_mode = V2SImode;
27718 reg_size = 8;
27721 strategy = info->savres_strategy;
27722 using_load_multiple = strategy & REST_MULTIPLE;
27723 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27724 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27725 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
27726 || rs6000_cpu == PROCESSOR_PPC603
27727 || rs6000_cpu == PROCESSOR_PPC750
27728 || optimize_size);
27729 /* Restore via the backchain when we have a large frame, since this
27730 is more efficient than an addis, addi pair. The second condition
27731 here will not trigger at the moment; We don't actually need a
27732 frame pointer for alloca, but the generic parts of the compiler
27733 give us one anyway. */
27734 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27735 ? info->lr_save_offset
27736 : 0) > 32767
27737 || (cfun->calls_alloca
27738 && !frame_pointer_needed));
27739 restore_lr = (info->lr_save_p
27740 && (restoring_FPRs_inline
27741 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27742 && (restoring_GPRs_inline
27743 || info->first_fp_reg_save < 64));
27745 if (WORLD_SAVE_P (info))
27747 int i, j;
27748 char rname[30];
27749 const char *alloc_rname;
27750 rtvec p;
27752 /* eh_rest_world_r10 will return to the location saved in the LR
27753 stack slot (which is not likely to be our caller.)
27754 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27755 rest_world is similar, except any R10 parameter is ignored.
27756 The exception-handling stuff that was here in 2.95 is no
27757 longer necessary. */
27759 p = rtvec_alloc (9
27761 + 32 - info->first_gp_reg_save
27762 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27763 + 63 + 1 - info->first_fp_reg_save);
27765 strcpy (rname, ((crtl->calls_eh_return) ?
27766 "*eh_rest_world_r10" : "*rest_world"));
27767 alloc_rname = ggc_strdup (rname);
27769 j = 0;
27770 RTVEC_ELT (p, j++) = ret_rtx;
27771 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27772 gen_rtx_REG (Pmode,
27773 LR_REGNO));
27774 RTVEC_ELT (p, j++)
27775 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27776 /* The instruction pattern requires a clobber here;
27777 it is shared with the restVEC helper. */
27778 RTVEC_ELT (p, j++)
27779 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27782 /* CR register traditionally saved as CR2. */
27783 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27784 RTVEC_ELT (p, j++)
27785 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27786 if (flag_shrink_wrap)
27788 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27789 gen_rtx_REG (Pmode, LR_REGNO),
27790 cfa_restores);
27791 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27795 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27797 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27798 RTVEC_ELT (p, j++)
27799 = gen_frame_load (reg,
27800 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27801 if (flag_shrink_wrap)
27802 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27804 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27806 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27807 RTVEC_ELT (p, j++)
27808 = gen_frame_load (reg,
27809 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27810 if (flag_shrink_wrap)
27811 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27813 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27815 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27816 ? DFmode : SFmode),
27817 info->first_fp_reg_save + i);
27818 RTVEC_ELT (p, j++)
27819 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27820 if (flag_shrink_wrap)
27821 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27823 RTVEC_ELT (p, j++)
27824 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27825 RTVEC_ELT (p, j++)
27826 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27827 RTVEC_ELT (p, j++)
27828 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27829 RTVEC_ELT (p, j++)
27830 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27831 RTVEC_ELT (p, j++)
27832 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27833 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27835 if (flag_shrink_wrap)
27837 REG_NOTES (insn) = cfa_restores;
27838 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27839 RTX_FRAME_RELATED_P (insn) = 1;
27841 return;
27844 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27845 if (info->push_p)
27846 frame_off = info->total_size;
27848 /* Restore AltiVec registers if we must do so before adjusting the
27849 stack. */
27850 if (info->altivec_size != 0
27851 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27852 || (DEFAULT_ABI != ABI_V4
27853 && offset_below_red_zone_p (info->altivec_save_offset))))
27855 int i;
27856 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27858 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27859 if (use_backchain_to_restore_sp)
27861 int frame_regno = 11;
27863 if ((strategy & REST_INLINE_VRS) == 0)
27865 /* Of r11 and r12, select the one not clobbered by an
27866 out-of-line restore function for the frame register. */
27867 frame_regno = 11 + 12 - scratch_regno;
27869 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27870 emit_move_insn (frame_reg_rtx,
27871 gen_rtx_MEM (Pmode, sp_reg_rtx));
27872 frame_off = 0;
27874 else if (frame_pointer_needed)
27875 frame_reg_rtx = hard_frame_pointer_rtx;
27877 if ((strategy & REST_INLINE_VRS) == 0)
27879 int end_save = info->altivec_save_offset + info->altivec_size;
27880 int ptr_off;
27881 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27882 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27884 if (end_save + frame_off != 0)
27886 rtx offset = GEN_INT (end_save + frame_off);
27888 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27890 else
27891 emit_move_insn (ptr_reg, frame_reg_rtx);
27893 ptr_off = -end_save;
27894 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27895 info->altivec_save_offset + ptr_off,
27896 0, V4SImode, SAVRES_VR);
27898 else
27900 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27901 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27903 rtx addr, areg, mem, insn;
27904 rtx reg = gen_rtx_REG (V4SImode, i);
27905 HOST_WIDE_INT offset
27906 = (info->altivec_save_offset + frame_off
27907 + 16 * (i - info->first_altivec_reg_save));
27909 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27911 mem = gen_frame_mem (V4SImode,
27912 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27913 GEN_INT (offset)));
27914 insn = gen_rtx_SET (reg, mem);
27916 else
27918 areg = gen_rtx_REG (Pmode, 0);
27919 emit_move_insn (areg, GEN_INT (offset));
27921 /* AltiVec addressing mode is [reg+reg]. */
27922 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27923 mem = gen_frame_mem (V4SImode, addr);
27925 /* Rather than emitting a generic move, force use of the
27926 lvx instruction, which we always want. In particular we
27927 don't want lxvd2x/xxpermdi for little endian. */
27928 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27931 (void) emit_insn (insn);
27935 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27936 if (((strategy & REST_INLINE_VRS) == 0
27937 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27938 && (flag_shrink_wrap
27939 || (offset_below_red_zone_p
27940 (info->altivec_save_offset
27941 + 16 * (i - info->first_altivec_reg_save)))))
27943 rtx reg = gen_rtx_REG (V4SImode, i);
27944 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27948 /* Restore VRSAVE if we must do so before adjusting the stack. */
27949 if (info->vrsave_size != 0
27950 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27951 || (DEFAULT_ABI != ABI_V4
27952 && offset_below_red_zone_p (info->vrsave_save_offset))))
27954 rtx reg;
27956 if (frame_reg_rtx == sp_reg_rtx)
27958 if (use_backchain_to_restore_sp)
27960 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27961 emit_move_insn (frame_reg_rtx,
27962 gen_rtx_MEM (Pmode, sp_reg_rtx));
27963 frame_off = 0;
27965 else if (frame_pointer_needed)
27966 frame_reg_rtx = hard_frame_pointer_rtx;
27969 reg = gen_rtx_REG (SImode, 12);
27970 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27971 info->vrsave_save_offset + frame_off));
27973 emit_insn (generate_set_vrsave (reg, info, 1));
27976 insn = NULL_RTX;
27977 /* If we have a large stack frame, restore the old stack pointer
27978 using the backchain. */
27979 if (use_backchain_to_restore_sp)
27981 if (frame_reg_rtx == sp_reg_rtx)
27983 /* Under V.4, don't reset the stack pointer until after we're done
27984 loading the saved registers. */
27985 if (DEFAULT_ABI == ABI_V4)
27986 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27988 insn = emit_move_insn (frame_reg_rtx,
27989 gen_rtx_MEM (Pmode, sp_reg_rtx));
27990 frame_off = 0;
27992 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27993 && DEFAULT_ABI == ABI_V4)
27994 /* frame_reg_rtx has been set up by the altivec restore. */
27996 else
27998 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
27999 frame_reg_rtx = sp_reg_rtx;
28002 /* If we have a frame pointer, we can restore the old stack pointer
28003 from it. */
28004 else if (frame_pointer_needed)
28006 frame_reg_rtx = sp_reg_rtx;
28007 if (DEFAULT_ABI == ABI_V4)
28008 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28009 /* Prevent reordering memory accesses against stack pointer restore. */
28010 else if (cfun->calls_alloca
28011 || offset_below_red_zone_p (-info->total_size))
28012 rs6000_emit_stack_tie (frame_reg_rtx, true);
28014 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28015 GEN_INT (info->total_size)));
28016 frame_off = 0;
28018 else if (info->push_p
28019 && DEFAULT_ABI != ABI_V4
28020 && !crtl->calls_eh_return)
28022 /* Prevent reordering memory accesses against stack pointer restore. */
28023 if (cfun->calls_alloca
28024 || offset_below_red_zone_p (-info->total_size))
28025 rs6000_emit_stack_tie (frame_reg_rtx, false);
28026 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28027 GEN_INT (info->total_size)));
28028 frame_off = 0;
28030 if (insn && frame_reg_rtx == sp_reg_rtx)
28032 if (cfa_restores)
28034 REG_NOTES (insn) = cfa_restores;
28035 cfa_restores = NULL_RTX;
28037 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28038 RTX_FRAME_RELATED_P (insn) = 1;
28041 /* Restore AltiVec registers if we have not done so already. */
28042 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28043 && info->altivec_size != 0
28044 && (DEFAULT_ABI == ABI_V4
28045 || !offset_below_red_zone_p (info->altivec_save_offset)))
28047 int i;
28049 if ((strategy & REST_INLINE_VRS) == 0)
28051 int end_save = info->altivec_save_offset + info->altivec_size;
28052 int ptr_off;
28053 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28054 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28055 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28057 if (end_save + frame_off != 0)
28059 rtx offset = GEN_INT (end_save + frame_off);
28061 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28063 else
28064 emit_move_insn (ptr_reg, frame_reg_rtx);
28066 ptr_off = -end_save;
28067 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28068 info->altivec_save_offset + ptr_off,
28069 0, V4SImode, SAVRES_VR);
28070 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28072 /* Frame reg was clobbered by out-of-line save. Restore it
28073 from ptr_reg, and if we are calling out-of-line gpr or
28074 fpr restore set up the correct pointer and offset. */
28075 unsigned newptr_regno = 1;
28076 if (!restoring_GPRs_inline)
28078 bool lr = info->gp_save_offset + info->gp_size == 0;
28079 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28080 newptr_regno = ptr_regno_for_savres (sel);
28081 end_save = info->gp_save_offset + info->gp_size;
28083 else if (!restoring_FPRs_inline)
28085 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28086 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28087 newptr_regno = ptr_regno_for_savres (sel);
28088 end_save = info->fp_save_offset + info->fp_size;
28091 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28092 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28094 if (end_save + ptr_off != 0)
28096 rtx offset = GEN_INT (end_save + ptr_off);
28098 frame_off = -end_save;
28099 if (TARGET_32BIT)
28100 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28101 ptr_reg, offset));
28102 else
28103 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28104 ptr_reg, offset));
28106 else
28108 frame_off = ptr_off;
28109 emit_move_insn (frame_reg_rtx, ptr_reg);
28113 else
28115 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28116 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28118 rtx addr, areg, mem, insn;
28119 rtx reg = gen_rtx_REG (V4SImode, i);
28120 HOST_WIDE_INT offset
28121 = (info->altivec_save_offset + frame_off
28122 + 16 * (i - info->first_altivec_reg_save));
28124 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
28126 mem = gen_frame_mem (V4SImode,
28127 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28128 GEN_INT (offset)));
28129 insn = gen_rtx_SET (reg, mem);
28131 else
28133 areg = gen_rtx_REG (Pmode, 0);
28134 emit_move_insn (areg, GEN_INT (offset));
28136 /* AltiVec addressing mode is [reg+reg]. */
28137 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28138 mem = gen_frame_mem (V4SImode, addr);
28140 /* Rather than emitting a generic move, force use of the
28141 lvx instruction, which we always want. In particular we
28142 don't want lxvd2x/xxpermdi for little endian. */
28143 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28146 (void) emit_insn (insn);
28150 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28151 if (((strategy & REST_INLINE_VRS) == 0
28152 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28153 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28155 rtx reg = gen_rtx_REG (V4SImode, i);
28156 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28160 /* Restore VRSAVE if we have not done so already. */
28161 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28162 && info->vrsave_size != 0
28163 && (DEFAULT_ABI == ABI_V4
28164 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28166 rtx reg;
28168 reg = gen_rtx_REG (SImode, 12);
28169 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28170 info->vrsave_save_offset + frame_off));
28172 emit_insn (generate_set_vrsave (reg, info, 1));
28175 /* If we exit by an out-of-line restore function on ABI_V4 then that
28176 function will deallocate the stack, so we don't need to worry
28177 about the unwinder restoring cr from an invalid stack frame
28178 location. */
28179 exit_func = (!restoring_FPRs_inline
28180 || (!restoring_GPRs_inline
28181 && info->first_fp_reg_save == 64));
28183 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28184 *separate* slots if the routine calls __builtin_eh_return, so
28185 that they can be independently restored by the unwinder. */
28186 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28188 int i, cr_off = info->ehcr_offset;
28190 for (i = 0; i < 8; i++)
28191 if (!call_used_regs[CR0_REGNO + i])
28193 rtx reg = gen_rtx_REG (SImode, 0);
28194 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28195 cr_off + frame_off));
28197 insn = emit_insn (gen_movsi_to_cr_one
28198 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28200 if (!exit_func && flag_shrink_wrap)
28202 add_reg_note (insn, REG_CFA_RESTORE,
28203 gen_rtx_REG (SImode, CR0_REGNO + i));
28205 RTX_FRAME_RELATED_P (insn) = 1;
28208 cr_off += reg_size;
28212 /* Get the old lr if we saved it. If we are restoring registers
28213 out-of-line, then the out-of-line routines can do this for us. */
28214 if (restore_lr && restoring_GPRs_inline)
28215 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28217 /* Get the old cr if we saved it. */
28218 if (info->cr_save_p)
28220 unsigned cr_save_regno = 12;
28222 if (!restoring_GPRs_inline)
28224 /* Ensure we don't use the register used by the out-of-line
28225 gpr register restore below. */
28226 bool lr = info->gp_save_offset + info->gp_size == 0;
28227 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28228 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28230 if (gpr_ptr_regno == 12)
28231 cr_save_regno = 11;
28232 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28234 else if (REGNO (frame_reg_rtx) == 12)
28235 cr_save_regno = 11;
28237 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28238 info->cr_save_offset + frame_off,
28239 exit_func);
28242 /* Set LR here to try to overlap restores below. */
28243 if (restore_lr && restoring_GPRs_inline)
28244 restore_saved_lr (0, exit_func);
28246 /* Load exception handler data registers, if needed. */
28247 if (crtl->calls_eh_return)
28249 unsigned int i, regno;
28251 if (TARGET_AIX)
28253 rtx reg = gen_rtx_REG (reg_mode, 2);
28254 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28255 frame_off + RS6000_TOC_SAVE_SLOT));
28258 for (i = 0; ; ++i)
28260 rtx mem;
28262 regno = EH_RETURN_DATA_REGNO (i);
28263 if (regno == INVALID_REGNUM)
28264 break;
28266 /* Note: possible use of r0 here to address SPE regs. */
28267 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28268 info->ehrd_offset + frame_off
28269 + reg_size * (int) i);
28271 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28275 /* Restore GPRs. This is done as a PARALLEL if we are using
28276 the load-multiple instructions. */
28277 if (TARGET_SPE_ABI
28278 && info->spe_64bit_regs_used
28279 && info->first_gp_reg_save != 32)
28281 /* Determine whether we can address all of the registers that need
28282 to be saved with an offset from frame_reg_rtx that fits in
28283 the small const field for SPE memory instructions. */
28284 int spe_regs_addressable
28285 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
28286 + reg_size * (32 - info->first_gp_reg_save - 1))
28287 && restoring_GPRs_inline);
28289 if (!spe_regs_addressable)
28291 int ool_adjust = 0;
28292 rtx old_frame_reg_rtx = frame_reg_rtx;
28293 /* Make r11 point to the start of the SPE save area. We worried about
28294 not clobbering it when we were saving registers in the prologue.
28295 There's no need to worry here because the static chain is passed
28296 anew to every function. */
28298 if (!restoring_GPRs_inline)
28299 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
28300 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28301 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
28302 GEN_INT (info->spe_gp_save_offset
28303 + frame_off
28304 - ool_adjust)));
28305 /* Keep the invariant that frame_reg_rtx + frame_off points
28306 at the top of the stack frame. */
28307 frame_off = -info->spe_gp_save_offset + ool_adjust;
28310 if (restoring_GPRs_inline)
28312 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
28314 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28315 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
28317 rtx offset, addr, mem, reg;
28319 /* We're doing all this to ensure that the immediate offset
28320 fits into the immediate field of 'evldd'. */
28321 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
28323 offset = GEN_INT (spe_offset + reg_size * i);
28324 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
28325 mem = gen_rtx_MEM (V2SImode, addr);
28326 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28328 emit_move_insn (reg, mem);
28331 else
28332 rs6000_emit_savres_rtx (info, frame_reg_rtx,
28333 info->spe_gp_save_offset + frame_off,
28334 info->lr_save_offset + frame_off,
28335 reg_mode,
28336 SAVRES_GPR | SAVRES_LR);
28338 else if (!restoring_GPRs_inline)
28340 /* We are jumping to an out-of-line function. */
28341 rtx ptr_reg;
28342 int end_save = info->gp_save_offset + info->gp_size;
28343 bool can_use_exit = end_save == 0;
28344 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28345 int ptr_off;
28347 /* Emit stack reset code if we need it. */
28348 ptr_regno = ptr_regno_for_savres (sel);
28349 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28350 if (can_use_exit)
28351 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
28352 else if (end_save + frame_off != 0)
28353 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28354 GEN_INT (end_save + frame_off)));
28355 else if (REGNO (frame_reg_rtx) != ptr_regno)
28356 emit_move_insn (ptr_reg, frame_reg_rtx);
28357 if (REGNO (frame_reg_rtx) == ptr_regno)
28358 frame_off = -end_save;
28360 if (can_use_exit && info->cr_save_p)
28361 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28363 ptr_off = -end_save;
28364 rs6000_emit_savres_rtx (info, ptr_reg,
28365 info->gp_save_offset + ptr_off,
28366 info->lr_save_offset + ptr_off,
28367 reg_mode, sel);
28369 else if (using_load_multiple)
28371 rtvec p;
28372 p = rtvec_alloc (32 - info->first_gp_reg_save);
28373 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28374 RTVEC_ELT (p, i)
28375 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28376 frame_reg_rtx,
28377 info->gp_save_offset + frame_off + reg_size * i);
28378 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28380 else
28382 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28383 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
28384 emit_insn (gen_frame_load
28385 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28386 frame_reg_rtx,
28387 info->gp_save_offset + frame_off + reg_size * i));
28390 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28392 /* If the frame pointer was used then we can't delay emitting
28393 a REG_CFA_DEF_CFA note. This must happen on the insn that
28394 restores the frame pointer, r31. We may have already emitted
28395 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28396 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28397 be harmless if emitted. */
28398 if (frame_pointer_needed)
28400 insn = get_last_insn ();
28401 add_reg_note (insn, REG_CFA_DEF_CFA,
28402 plus_constant (Pmode, frame_reg_rtx, frame_off));
28403 RTX_FRAME_RELATED_P (insn) = 1;
28406 /* Set up cfa_restores. We always need these when
28407 shrink-wrapping. If not shrink-wrapping then we only need
28408 the cfa_restore when the stack location is no longer valid.
28409 The cfa_restores must be emitted on or before the insn that
28410 invalidates the stack, and of course must not be emitted
28411 before the insn that actually does the restore. The latter
28412 is why it is a bad idea to emit the cfa_restores as a group
28413 on the last instruction here that actually does a restore:
28414 That insn may be reordered with respect to others doing
28415 restores. */
28416 if (flag_shrink_wrap
28417 && !restoring_GPRs_inline
28418 && info->first_fp_reg_save == 64)
28419 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28421 for (i = info->first_gp_reg_save; i < 32; i++)
28422 if (!restoring_GPRs_inline
28423 || using_load_multiple
28424 || rs6000_reg_live_or_pic_offset_p (i))
28426 rtx reg = gen_rtx_REG (reg_mode, i);
28428 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28432 if (!restoring_GPRs_inline
28433 && info->first_fp_reg_save == 64)
28435 /* We are jumping to an out-of-line function. */
28436 if (cfa_restores)
28437 emit_cfa_restores (cfa_restores);
28438 return;
28441 if (restore_lr && !restoring_GPRs_inline)
28443 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28444 restore_saved_lr (0, exit_func);
28447 /* Restore fpr's if we need to do it without calling a function. */
28448 if (restoring_FPRs_inline)
28449 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28450 if (save_reg_p (info->first_fp_reg_save + i))
28452 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28453 ? DFmode : SFmode),
28454 info->first_fp_reg_save + i);
28455 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28456 info->fp_save_offset + frame_off + 8 * i));
28457 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28458 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28461 /* If we saved cr, restore it here. Just those that were used. */
28462 if (info->cr_save_p)
28463 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28465 /* If this is V.4, unwind the stack pointer after all of the loads
28466 have been done, or set up r11 if we are restoring fp out of line. */
28467 ptr_regno = 1;
28468 if (!restoring_FPRs_inline)
28470 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28471 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28472 ptr_regno = ptr_regno_for_savres (sel);
28475 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
28476 if (REGNO (frame_reg_rtx) == ptr_regno)
28477 frame_off = 0;
28479 if (insn && restoring_FPRs_inline)
28481 if (cfa_restores)
28483 REG_NOTES (insn) = cfa_restores;
28484 cfa_restores = NULL_RTX;
28486 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28487 RTX_FRAME_RELATED_P (insn) = 1;
28490 if (crtl->calls_eh_return)
28492 rtx sa = EH_RETURN_STACKADJ_RTX;
28493 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28496 if (!sibcall)
28498 rtvec p;
28499 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28500 if (! restoring_FPRs_inline)
28502 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
28503 RTVEC_ELT (p, 0) = ret_rtx;
28505 else
28507 if (cfa_restores)
28509 /* We can't hang the cfa_restores off a simple return,
28510 since the shrink-wrap code sometimes uses an existing
28511 return. This means there might be a path from
28512 pre-prologue code to this return, and dwarf2cfi code
28513 wants the eh_frame unwinder state to be the same on
28514 all paths to any point. So we need to emit the
28515 cfa_restores before the return. For -m64 we really
28516 don't need epilogue cfa_restores at all, except for
28517 this irritating dwarf2cfi with shrink-wrap
28518 requirement; The stack red-zone means eh_frame info
28519 from the prologue telling the unwinder to restore
28520 from the stack is perfectly good right to the end of
28521 the function. */
28522 emit_insn (gen_blockage ());
28523 emit_cfa_restores (cfa_restores);
28524 cfa_restores = NULL_RTX;
28526 p = rtvec_alloc (2);
28527 RTVEC_ELT (p, 0) = simple_return_rtx;
28530 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
28531 ? gen_rtx_USE (VOIDmode,
28532 gen_rtx_REG (Pmode, LR_REGNO))
28533 : gen_rtx_CLOBBER (VOIDmode,
28534 gen_rtx_REG (Pmode, LR_REGNO)));
28536 /* If we have to restore more than two FP registers, branch to the
28537 restore function. It will return to our caller. */
28538 if (! restoring_FPRs_inline)
28540 int i;
28541 int reg;
28542 rtx sym;
28544 if (flag_shrink_wrap)
28545 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28547 sym = rs6000_savres_routine_sym (info,
28548 SAVRES_FPR | (lr ? SAVRES_LR : 0));
28549 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
28550 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28551 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28553 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28555 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28557 RTVEC_ELT (p, i + 4)
28558 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28559 if (flag_shrink_wrap)
28560 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28561 cfa_restores);
28565 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28568 if (cfa_restores)
28570 if (sibcall)
28571 /* Ensure the cfa_restores are hung off an insn that won't
28572 be reordered above other restores. */
28573 emit_insn (gen_blockage ());
28575 emit_cfa_restores (cfa_restores);
28579 /* Write function epilogue. */
28581 static void
28582 rs6000_output_function_epilogue (FILE *file,
28583 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
28585 #if TARGET_MACHO
28586 macho_branch_islands ();
28587 /* Mach-O doesn't support labels at the end of objects, so if
28588 it looks like we might want one, insert a NOP. */
28590 rtx_insn *insn = get_last_insn ();
28591 rtx_insn *deleted_debug_label = NULL;
28592 while (insn
28593 && NOTE_P (insn)
28594 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28596 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28597 notes only, instead set their CODE_LABEL_NUMBER to -1,
28598 otherwise there would be code generation differences
28599 in between -g and -g0. */
28600 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28601 deleted_debug_label = insn;
28602 insn = PREV_INSN (insn);
28604 if (insn
28605 && (LABEL_P (insn)
28606 || (NOTE_P (insn)
28607 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
28608 fputs ("\tnop\n", file);
28609 else if (deleted_debug_label)
28610 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28611 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28612 CODE_LABEL_NUMBER (insn) = -1;
28614 #endif
28616 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28617 on its format.
28619 We don't output a traceback table if -finhibit-size-directive was
28620 used. The documentation for -finhibit-size-directive reads
28621 ``don't output a @code{.size} assembler directive, or anything
28622 else that would cause trouble if the function is split in the
28623 middle, and the two halves are placed at locations far apart in
28624 memory.'' The traceback table has this property, since it
28625 includes the offset from the start of the function to the
28626 traceback table itself.
28628 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28629 different traceback table. */
28630 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28631 && ! flag_inhibit_size_directive
28632 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28634 const char *fname = NULL;
28635 const char *language_string = lang_hooks.name;
28636 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28637 int i;
28638 int optional_tbtab;
28639 rs6000_stack_t *info = rs6000_stack_info ();
28641 if (rs6000_traceback == traceback_full)
28642 optional_tbtab = 1;
28643 else if (rs6000_traceback == traceback_part)
28644 optional_tbtab = 0;
28645 else
28646 optional_tbtab = !optimize_size && !TARGET_ELF;
28648 if (optional_tbtab)
28650 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28651 while (*fname == '.') /* V.4 encodes . in the name */
28652 fname++;
28654 /* Need label immediately before tbtab, so we can compute
28655 its offset from the function start. */
28656 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28657 ASM_OUTPUT_LABEL (file, fname);
28660 /* The .tbtab pseudo-op can only be used for the first eight
28661 expressions, since it can't handle the possibly variable
28662 length fields that follow. However, if you omit the optional
28663 fields, the assembler outputs zeros for all optional fields
28664 anyways, giving each variable length field is minimum length
28665 (as defined in sys/debug.h). Thus we can not use the .tbtab
28666 pseudo-op at all. */
28668 /* An all-zero word flags the start of the tbtab, for debuggers
28669 that have to find it by searching forward from the entry
28670 point or from the current pc. */
28671 fputs ("\t.long 0\n", file);
28673 /* Tbtab format type. Use format type 0. */
28674 fputs ("\t.byte 0,", file);
28676 /* Language type. Unfortunately, there does not seem to be any
28677 official way to discover the language being compiled, so we
28678 use language_string.
28679 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28680 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28681 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28682 either, so for now use 0. */
28683 if (lang_GNU_C ()
28684 || ! strcmp (language_string, "GNU GIMPLE")
28685 || ! strcmp (language_string, "GNU Go")
28686 || ! strcmp (language_string, "libgccjit"))
28687 i = 0;
28688 else if (! strcmp (language_string, "GNU F77")
28689 || lang_GNU_Fortran ())
28690 i = 1;
28691 else if (! strcmp (language_string, "GNU Pascal"))
28692 i = 2;
28693 else if (! strcmp (language_string, "GNU Ada"))
28694 i = 3;
28695 else if (lang_GNU_CXX ()
28696 || ! strcmp (language_string, "GNU Objective-C++"))
28697 i = 9;
28698 else if (! strcmp (language_string, "GNU Java"))
28699 i = 13;
28700 else if (! strcmp (language_string, "GNU Objective-C"))
28701 i = 14;
28702 else
28703 gcc_unreachable ();
28704 fprintf (file, "%d,", i);
28706 /* 8 single bit fields: global linkage (not set for C extern linkage,
28707 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28708 from start of procedure stored in tbtab, internal function, function
28709 has controlled storage, function has no toc, function uses fp,
28710 function logs/aborts fp operations. */
28711 /* Assume that fp operations are used if any fp reg must be saved. */
28712 fprintf (file, "%d,",
28713 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28715 /* 6 bitfields: function is interrupt handler, name present in
28716 proc table, function calls alloca, on condition directives
28717 (controls stack walks, 3 bits), saves condition reg, saves
28718 link reg. */
28719 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28720 set up as a frame pointer, even when there is no alloca call. */
28721 fprintf (file, "%d,",
28722 ((optional_tbtab << 6)
28723 | ((optional_tbtab & frame_pointer_needed) << 5)
28724 | (info->cr_save_p << 1)
28725 | (info->lr_save_p)));
28727 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28728 (6 bits). */
28729 fprintf (file, "%d,",
28730 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28732 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28733 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28735 if (optional_tbtab)
28737 /* Compute the parameter info from the function decl argument
28738 list. */
28739 tree decl;
28740 int next_parm_info_bit = 31;
28742 for (decl = DECL_ARGUMENTS (current_function_decl);
28743 decl; decl = DECL_CHAIN (decl))
28745 rtx parameter = DECL_INCOMING_RTL (decl);
28746 machine_mode mode = GET_MODE (parameter);
28748 if (GET_CODE (parameter) == REG)
28750 if (SCALAR_FLOAT_MODE_P (mode))
28752 int bits;
28754 float_parms++;
28756 switch (mode)
28758 case SFmode:
28759 case SDmode:
28760 bits = 0x2;
28761 break;
28763 case DFmode:
28764 case DDmode:
28765 case TFmode:
28766 case TDmode:
28767 case IFmode:
28768 case KFmode:
28769 bits = 0x3;
28770 break;
28772 default:
28773 gcc_unreachable ();
28776 /* If only one bit will fit, don't or in this entry. */
28777 if (next_parm_info_bit > 0)
28778 parm_info |= (bits << (next_parm_info_bit - 1));
28779 next_parm_info_bit -= 2;
28781 else
28783 fixed_parms += ((GET_MODE_SIZE (mode)
28784 + (UNITS_PER_WORD - 1))
28785 / UNITS_PER_WORD);
28786 next_parm_info_bit -= 1;
28792 /* Number of fixed point parameters. */
28793 /* This is actually the number of words of fixed point parameters; thus
28794 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28795 fprintf (file, "%d,", fixed_parms);
28797 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28798 all on stack. */
28799 /* This is actually the number of fp registers that hold parameters;
28800 and thus the maximum value is 13. */
28801 /* Set parameters on stack bit if parameters are not in their original
28802 registers, regardless of whether they are on the stack? Xlc
28803 seems to set the bit when not optimizing. */
28804 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28806 if (! optional_tbtab)
28807 return;
28809 /* Optional fields follow. Some are variable length. */
28811 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
28812 11 double float. */
28813 /* There is an entry for each parameter in a register, in the order that
28814 they occur in the parameter list. Any intervening arguments on the
28815 stack are ignored. If the list overflows a long (max possible length
28816 34 bits) then completely leave off all elements that don't fit. */
28817 /* Only emit this long if there was at least one parameter. */
28818 if (fixed_parms || float_parms)
28819 fprintf (file, "\t.long %d\n", parm_info);
28821 /* Offset from start of code to tb table. */
28822 fputs ("\t.long ", file);
28823 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28824 RS6000_OUTPUT_BASENAME (file, fname);
28825 putc ('-', file);
28826 rs6000_output_function_entry (file, fname);
28827 putc ('\n', file);
28829 /* Interrupt handler mask. */
28830 /* Omit this long, since we never set the interrupt handler bit
28831 above. */
28833 /* Number of CTL (controlled storage) anchors. */
28834 /* Omit this long, since the has_ctl bit is never set above. */
28836 /* Displacement into stack of each CTL anchor. */
28837 /* Omit this list of longs, because there are no CTL anchors. */
28839 /* Length of function name. */
28840 if (*fname == '*')
28841 ++fname;
28842 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28844 /* Function name. */
28845 assemble_string (fname, strlen (fname));
28847 /* Register for alloca automatic storage; this is always reg 31.
28848 Only emit this if the alloca bit was set above. */
28849 if (frame_pointer_needed)
28850 fputs ("\t.byte 31\n", file);
28852 fputs ("\t.align 2\n", file);
28855 /* Arrange to define .LCTOC1 label, if not already done. */
28856 if (need_toc_init)
28858 need_toc_init = 0;
28859 if (!toc_initialized)
28861 switch_to_section (toc_section);
28862 switch_to_section (current_function_section ());
28867 /* -fsplit-stack support. */
28869 /* A SYMBOL_REF for __morestack. */
28870 static GTY(()) rtx morestack_ref;
28872 static rtx
28873 gen_add3_const (rtx rt, rtx ra, long c)
28875 if (TARGET_64BIT)
28876 return gen_adddi3 (rt, ra, GEN_INT (c));
28877 else
28878 return gen_addsi3 (rt, ra, GEN_INT (c));
28881 /* Emit -fsplit-stack prologue, which goes before the regular function
28882 prologue (at local entry point in the case of ELFv2). */
28884 void
28885 rs6000_expand_split_stack_prologue (void)
28887 rs6000_stack_t *info = rs6000_stack_info ();
28888 unsigned HOST_WIDE_INT allocate;
28889 long alloc_hi, alloc_lo;
28890 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28891 rtx_insn *insn;
28893 gcc_assert (flag_split_stack && reload_completed);
28895 if (!info->push_p)
28896 return;
28898 if (global_regs[29])
28900 error ("-fsplit-stack uses register r29");
28901 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28902 "conflicts with %qD", global_regs_decl[29]);
28905 allocate = info->total_size;
28906 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28908 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28909 return;
28911 if (morestack_ref == NULL_RTX)
28913 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28914 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
28915 | SYMBOL_FLAG_FUNCTION);
28918 r0 = gen_rtx_REG (Pmode, 0);
28919 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28920 r12 = gen_rtx_REG (Pmode, 12);
28921 emit_insn (gen_load_split_stack_limit (r0));
28922 /* Always emit two insns here to calculate the requested stack,
28923 so that the linker can edit them when adjusting size for calling
28924 non-split-stack code. */
28925 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
28926 alloc_lo = -allocate - alloc_hi;
28927 if (alloc_hi != 0)
28929 emit_insn (gen_add3_const (r12, r1, alloc_hi));
28930 if (alloc_lo != 0)
28931 emit_insn (gen_add3_const (r12, r12, alloc_lo));
28932 else
28933 emit_insn (gen_nop ());
28935 else
28937 emit_insn (gen_add3_const (r12, r1, alloc_lo));
28938 emit_insn (gen_nop ());
28941 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
28942 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
28943 ok_label = gen_label_rtx ();
28944 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28945 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
28946 gen_rtx_LABEL_REF (VOIDmode, ok_label),
28947 pc_rtx);
28948 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28949 JUMP_LABEL (jump) = ok_label;
28950 /* Mark the jump as very likely to be taken. */
28951 add_int_reg_note (jump, REG_BR_PROB,
28952 REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100);
28954 lr = gen_rtx_REG (Pmode, LR_REGNO);
28955 insn = emit_move_insn (r0, lr);
28956 RTX_FRAME_RELATED_P (insn) = 1;
28957 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
28958 RTX_FRAME_RELATED_P (insn) = 1;
28960 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
28961 const0_rtx, const0_rtx));
28962 call_fusage = NULL_RTX;
28963 use_reg (&call_fusage, r12);
28964 /* Say the call uses r0, even though it doesn't, to stop regrename
28965 from twiddling with the insns saving lr, trashing args for cfun.
28966 The insns restoring lr are similarly protected by making
28967 split_stack_return use r0. */
28968 use_reg (&call_fusage, r0);
28969 add_function_usage_to (insn, call_fusage);
28970 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
28971 insn = emit_move_insn (lr, r0);
28972 add_reg_note (insn, REG_CFA_RESTORE, lr);
28973 RTX_FRAME_RELATED_P (insn) = 1;
28974 emit_insn (gen_split_stack_return ());
28976 emit_label (ok_label);
28977 LABEL_NUSES (ok_label) = 1;
28980 /* Return the internal arg pointer used for function incoming
28981 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28982 to copy it to a pseudo in order for it to be preserved over calls
28983 and suchlike. We'd really like to use a pseudo here for the
28984 internal arg pointer but data-flow analysis is not prepared to
28985 accept pseudos as live at the beginning of a function. */
28987 static rtx
28988 rs6000_internal_arg_pointer (void)
28990 if (flag_split_stack
28991 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
28992 == NULL))
28995 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
28997 rtx pat;
28999 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29000 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29002 /* Put the pseudo initialization right after the note at the
29003 beginning of the function. */
29004 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29005 gen_rtx_REG (Pmode, 12));
29006 push_topmost_sequence ();
29007 emit_insn_after (pat, get_insns ());
29008 pop_topmost_sequence ();
29010 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29011 FIRST_PARM_OFFSET (current_function_decl));
29013 return virtual_incoming_args_rtx;
29016 /* We may have to tell the dataflow pass that the split stack prologue
29017 is initializing a register. */
29019 static void
29020 rs6000_live_on_entry (bitmap regs)
29022 if (flag_split_stack)
29023 bitmap_set_bit (regs, 12);
29026 /* Emit -fsplit-stack dynamic stack allocation space check. */
29028 void
29029 rs6000_split_stack_space_check (rtx size, rtx label)
29031 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29032 rtx limit = gen_reg_rtx (Pmode);
29033 rtx requested = gen_reg_rtx (Pmode);
29034 rtx cmp = gen_reg_rtx (CCUNSmode);
29035 rtx jump;
29037 emit_insn (gen_load_split_stack_limit (limit));
29038 if (CONST_INT_P (size))
29039 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29040 else
29042 size = force_reg (Pmode, size);
29043 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29045 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29046 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29047 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29048 gen_rtx_LABEL_REF (VOIDmode, label),
29049 pc_rtx);
29050 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29051 JUMP_LABEL (jump) = label;
29054 /* A C compound statement that outputs the assembler code for a thunk
29055 function, used to implement C++ virtual function calls with
29056 multiple inheritance. The thunk acts as a wrapper around a virtual
29057 function, adjusting the implicit object parameter before handing
29058 control off to the real function.
29060 First, emit code to add the integer DELTA to the location that
29061 contains the incoming first argument. Assume that this argument
29062 contains a pointer, and is the one used to pass the `this' pointer
29063 in C++. This is the incoming argument *before* the function
29064 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29065 values of all other incoming arguments.
29067 After the addition, emit code to jump to FUNCTION, which is a
29068 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29069 not touch the return address. Hence returning from FUNCTION will
29070 return to whoever called the current `thunk'.
29072 The effect must be as if FUNCTION had been called directly with the
29073 adjusted first argument. This macro is responsible for emitting
29074 all of the code for a thunk function; output_function_prologue()
29075 and output_function_epilogue() are not invoked.
29077 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29078 been extracted from it.) It might possibly be useful on some
29079 targets, but probably not.
29081 If you do not define this macro, the target-independent code in the
29082 C++ frontend will generate a less efficient heavyweight thunk that
29083 calls FUNCTION instead of jumping to it. The generic approach does
29084 not support varargs. */
29086 static void
29087 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29088 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29089 tree function)
29091 rtx this_rtx, funexp;
29092 rtx_insn *insn;
29094 reload_completed = 1;
29095 epilogue_completed = 1;
29097 /* Mark the end of the (empty) prologue. */
29098 emit_note (NOTE_INSN_PROLOGUE_END);
29100 /* Find the "this" pointer. If the function returns a structure,
29101 the structure return pointer is in r3. */
29102 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29103 this_rtx = gen_rtx_REG (Pmode, 4);
29104 else
29105 this_rtx = gen_rtx_REG (Pmode, 3);
29107 /* Apply the constant offset, if required. */
29108 if (delta)
29109 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29111 /* Apply the offset from the vtable, if required. */
29112 if (vcall_offset)
29114 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29115 rtx tmp = gen_rtx_REG (Pmode, 12);
29117 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29118 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29120 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29121 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29123 else
29125 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29127 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29129 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29132 /* Generate a tail call to the target function. */
29133 if (!TREE_USED (function))
29135 assemble_external (function);
29136 TREE_USED (function) = 1;
29138 funexp = XEXP (DECL_RTL (function), 0);
29139 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29141 #if TARGET_MACHO
29142 if (MACHOPIC_INDIRECT)
29143 funexp = machopic_indirect_call_target (funexp);
29144 #endif
29146 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29147 generate sibcall RTL explicitly. */
29148 insn = emit_call_insn (
29149 gen_rtx_PARALLEL (VOIDmode,
29150 gen_rtvec (4,
29151 gen_rtx_CALL (VOIDmode,
29152 funexp, const0_rtx),
29153 gen_rtx_USE (VOIDmode, const0_rtx),
29154 gen_rtx_USE (VOIDmode,
29155 gen_rtx_REG (SImode,
29156 LR_REGNO)),
29157 simple_return_rtx)));
29158 SIBLING_CALL_P (insn) = 1;
29159 emit_barrier ();
29161 /* Run just enough of rest_of_compilation to get the insns emitted.
29162 There's not really enough bulk here to make other passes such as
29163 instruction scheduling worth while. Note that use_thunk calls
29164 assemble_start_function and assemble_end_function. */
29165 insn = get_insns ();
29166 shorten_branches (insn);
29167 final_start_function (insn, file, 1);
29168 final (insn, file, 1);
29169 final_end_function ();
29171 reload_completed = 0;
29172 epilogue_completed = 0;
29175 /* A quick summary of the various types of 'constant-pool tables'
29176 under PowerPC:
29178 Target Flags Name One table per
29179 AIX (none) AIX TOC object file
29180 AIX -mfull-toc AIX TOC object file
29181 AIX -mminimal-toc AIX minimal TOC translation unit
29182 SVR4/EABI (none) SVR4 SDATA object file
29183 SVR4/EABI -fpic SVR4 pic object file
29184 SVR4/EABI -fPIC SVR4 PIC translation unit
29185 SVR4/EABI -mrelocatable EABI TOC function
29186 SVR4/EABI -maix AIX TOC object file
29187 SVR4/EABI -maix -mminimal-toc
29188 AIX minimal TOC translation unit
29190 Name Reg. Set by entries contains:
29191 made by addrs? fp? sum?
29193 AIX TOC 2 crt0 as Y option option
29194 AIX minimal TOC 30 prolog gcc Y Y option
29195 SVR4 SDATA 13 crt0 gcc N Y N
29196 SVR4 pic 30 prolog ld Y not yet N
29197 SVR4 PIC 30 prolog gcc Y option option
29198 EABI TOC 30 prolog gcc Y option option
29202 /* Hash functions for the hash table. */
29204 static unsigned
29205 rs6000_hash_constant (rtx k)
29207 enum rtx_code code = GET_CODE (k);
29208 machine_mode mode = GET_MODE (k);
29209 unsigned result = (code << 3) ^ mode;
29210 const char *format;
29211 int flen, fidx;
29213 format = GET_RTX_FORMAT (code);
29214 flen = strlen (format);
29215 fidx = 0;
29217 switch (code)
29219 case LABEL_REF:
29220 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29222 case CONST_WIDE_INT:
29224 int i;
29225 flen = CONST_WIDE_INT_NUNITS (k);
29226 for (i = 0; i < flen; i++)
29227 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29228 return result;
29231 case CONST_DOUBLE:
29232 if (mode != VOIDmode)
29233 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29234 flen = 2;
29235 break;
29237 case CODE_LABEL:
29238 fidx = 3;
29239 break;
29241 default:
29242 break;
29245 for (; fidx < flen; fidx++)
29246 switch (format[fidx])
29248 case 's':
29250 unsigned i, len;
29251 const char *str = XSTR (k, fidx);
29252 len = strlen (str);
29253 result = result * 613 + len;
29254 for (i = 0; i < len; i++)
29255 result = result * 613 + (unsigned) str[i];
29256 break;
29258 case 'u':
29259 case 'e':
29260 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29261 break;
29262 case 'i':
29263 case 'n':
29264 result = result * 613 + (unsigned) XINT (k, fidx);
29265 break;
29266 case 'w':
29267 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29268 result = result * 613 + (unsigned) XWINT (k, fidx);
29269 else
29271 size_t i;
29272 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29273 result = result * 613 + (unsigned) (XWINT (k, fidx)
29274 >> CHAR_BIT * i);
29276 break;
29277 case '0':
29278 break;
29279 default:
29280 gcc_unreachable ();
29283 return result;
29286 hashval_t
29287 toc_hasher::hash (toc_hash_struct *thc)
29289 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29292 /* Compare H1 and H2 for equivalence. */
29294 bool
29295 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29297 rtx r1 = h1->key;
29298 rtx r2 = h2->key;
29300 if (h1->key_mode != h2->key_mode)
29301 return 0;
29303 return rtx_equal_p (r1, r2);
29306 /* These are the names given by the C++ front-end to vtables, and
29307 vtable-like objects. Ideally, this logic should not be here;
29308 instead, there should be some programmatic way of inquiring as
29309 to whether or not an object is a vtable. */
29311 #define VTABLE_NAME_P(NAME) \
29312 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29313 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29314 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29315 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29316 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29318 #ifdef NO_DOLLAR_IN_LABEL
29319 /* Return a GGC-allocated character string translating dollar signs in
29320 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29322 const char *
29323 rs6000_xcoff_strip_dollar (const char *name)
29325 char *strip, *p;
29326 const char *q;
29327 size_t len;
29329 q = (const char *) strchr (name, '$');
29331 if (q == 0 || q == name)
29332 return name;
29334 len = strlen (name);
29335 strip = XALLOCAVEC (char, len + 1);
29336 strcpy (strip, name);
29337 p = strip + (q - name);
29338 while (p)
29340 *p = '_';
29341 p = strchr (p + 1, '$');
29344 return ggc_alloc_string (strip, len);
29346 #endif
29348 void
29349 rs6000_output_symbol_ref (FILE *file, rtx x)
29351 /* Currently C++ toc references to vtables can be emitted before it
29352 is decided whether the vtable is public or private. If this is
29353 the case, then the linker will eventually complain that there is
29354 a reference to an unknown section. Thus, for vtables only,
29355 we emit the TOC reference to reference the symbol and not the
29356 section. */
29357 const char *name = XSTR (x, 0);
29359 tree decl = SYMBOL_REF_DECL (x);
29360 if (decl /* sync condition with assemble_external () */
29361 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
29362 && (TREE_CODE (decl) == VAR_DECL
29363 || TREE_CODE (decl) == FUNCTION_DECL)
29364 && name[strlen (name) - 1] != ']')
29366 name = concat (name,
29367 (TREE_CODE (decl) == FUNCTION_DECL
29368 ? "[DS]" : "[UA]"),
29369 NULL);
29370 XSTR (x, 0) = name;
29373 if (VTABLE_NAME_P (name))
29375 RS6000_OUTPUT_BASENAME (file, name);
29377 else
29378 assemble_name (file, name);
29381 /* Output a TOC entry. We derive the entry name from what is being
29382 written. */
29384 void
29385 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29387 char buf[256];
29388 const char *name = buf;
29389 rtx base = x;
29390 HOST_WIDE_INT offset = 0;
29392 gcc_assert (!TARGET_NO_TOC);
29394 /* When the linker won't eliminate them, don't output duplicate
29395 TOC entries (this happens on AIX if there is any kind of TOC,
29396 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29397 CODE_LABELs. */
29398 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29400 struct toc_hash_struct *h;
29402 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29403 time because GGC is not initialized at that point. */
29404 if (toc_hash_table == NULL)
29405 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29407 h = ggc_alloc<toc_hash_struct> ();
29408 h->key = x;
29409 h->key_mode = mode;
29410 h->labelno = labelno;
29412 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29413 if (*found == NULL)
29414 *found = h;
29415 else /* This is indeed a duplicate.
29416 Set this label equal to that label. */
29418 fputs ("\t.set ", file);
29419 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29420 fprintf (file, "%d,", labelno);
29421 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29422 fprintf (file, "%d\n", ((*found)->labelno));
29424 #ifdef HAVE_AS_TLS
29425 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29426 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29427 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29429 fputs ("\t.set ", file);
29430 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29431 fprintf (file, "%d,", labelno);
29432 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29433 fprintf (file, "%d\n", ((*found)->labelno));
29435 #endif
29436 return;
29440 /* If we're going to put a double constant in the TOC, make sure it's
29441 aligned properly when strict alignment is on. */
29442 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29443 && STRICT_ALIGNMENT
29444 && GET_MODE_BITSIZE (mode) >= 64
29445 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29446 ASM_OUTPUT_ALIGN (file, 3);
29449 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29451 /* Handle FP constants specially. Note that if we have a minimal
29452 TOC, things we put here aren't actually in the TOC, so we can allow
29453 FP constants. */
29454 if (GET_CODE (x) == CONST_DOUBLE &&
29455 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29456 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29458 long k[4];
29460 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29461 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29462 else
29463 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29465 if (TARGET_64BIT)
29467 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29468 fputs (DOUBLE_INT_ASM_OP, file);
29469 else
29470 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29471 k[0] & 0xffffffff, k[1] & 0xffffffff,
29472 k[2] & 0xffffffff, k[3] & 0xffffffff);
29473 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29474 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29475 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29476 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29477 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29478 return;
29480 else
29482 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29483 fputs ("\t.long ", file);
29484 else
29485 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29486 k[0] & 0xffffffff, k[1] & 0xffffffff,
29487 k[2] & 0xffffffff, k[3] & 0xffffffff);
29488 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29489 k[0] & 0xffffffff, k[1] & 0xffffffff,
29490 k[2] & 0xffffffff, k[3] & 0xffffffff);
29491 return;
29494 else if (GET_CODE (x) == CONST_DOUBLE &&
29495 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29497 long k[2];
29499 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29500 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29501 else
29502 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29504 if (TARGET_64BIT)
29506 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29507 fputs (DOUBLE_INT_ASM_OP, file);
29508 else
29509 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29510 k[0] & 0xffffffff, k[1] & 0xffffffff);
29511 fprintf (file, "0x%lx%08lx\n",
29512 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29513 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29514 return;
29516 else
29518 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29519 fputs ("\t.long ", file);
29520 else
29521 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29522 k[0] & 0xffffffff, k[1] & 0xffffffff);
29523 fprintf (file, "0x%lx,0x%lx\n",
29524 k[0] & 0xffffffff, k[1] & 0xffffffff);
29525 return;
29528 else if (GET_CODE (x) == CONST_DOUBLE &&
29529 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29531 long l;
29533 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29534 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29535 else
29536 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29538 if (TARGET_64BIT)
29540 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29541 fputs (DOUBLE_INT_ASM_OP, file);
29542 else
29543 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29544 if (WORDS_BIG_ENDIAN)
29545 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29546 else
29547 fprintf (file, "0x%lx\n", l & 0xffffffff);
29548 return;
29550 else
29552 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29553 fputs ("\t.long ", file);
29554 else
29555 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29556 fprintf (file, "0x%lx\n", l & 0xffffffff);
29557 return;
29560 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29562 unsigned HOST_WIDE_INT low;
29563 HOST_WIDE_INT high;
29565 low = INTVAL (x) & 0xffffffff;
29566 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29568 /* TOC entries are always Pmode-sized, so when big-endian
29569 smaller integer constants in the TOC need to be padded.
29570 (This is still a win over putting the constants in
29571 a separate constant pool, because then we'd have
29572 to have both a TOC entry _and_ the actual constant.)
29574 For a 32-bit target, CONST_INT values are loaded and shifted
29575 entirely within `low' and can be stored in one TOC entry. */
29577 /* It would be easy to make this work, but it doesn't now. */
29578 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29580 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29582 low |= high << 32;
29583 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29584 high = (HOST_WIDE_INT) low >> 32;
29585 low &= 0xffffffff;
29588 if (TARGET_64BIT)
29590 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29591 fputs (DOUBLE_INT_ASM_OP, file);
29592 else
29593 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29594 (long) high & 0xffffffff, (long) low & 0xffffffff);
29595 fprintf (file, "0x%lx%08lx\n",
29596 (long) high & 0xffffffff, (long) low & 0xffffffff);
29597 return;
29599 else
29601 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29603 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29604 fputs ("\t.long ", file);
29605 else
29606 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29607 (long) high & 0xffffffff, (long) low & 0xffffffff);
29608 fprintf (file, "0x%lx,0x%lx\n",
29609 (long) high & 0xffffffff, (long) low & 0xffffffff);
29611 else
29613 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29614 fputs ("\t.long ", file);
29615 else
29616 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29617 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29619 return;
29623 if (GET_CODE (x) == CONST)
29625 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29626 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29628 base = XEXP (XEXP (x, 0), 0);
29629 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29632 switch (GET_CODE (base))
29634 case SYMBOL_REF:
29635 name = XSTR (base, 0);
29636 break;
29638 case LABEL_REF:
29639 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29640 CODE_LABEL_NUMBER (XEXP (base, 0)));
29641 break;
29643 case CODE_LABEL:
29644 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29645 break;
29647 default:
29648 gcc_unreachable ();
29651 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29652 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29653 else
29655 fputs ("\t.tc ", file);
29656 RS6000_OUTPUT_BASENAME (file, name);
29658 if (offset < 0)
29659 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29660 else if (offset)
29661 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29663 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29664 after other TOC symbols, reducing overflow of small TOC access
29665 to [TC] symbols. */
29666 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29667 ? "[TE]," : "[TC],", file);
29670 /* Currently C++ toc references to vtables can be emitted before it
29671 is decided whether the vtable is public or private. If this is
29672 the case, then the linker will eventually complain that there is
29673 a TOC reference to an unknown section. Thus, for vtables only,
29674 we emit the TOC reference to reference the symbol and not the
29675 section. */
29676 if (VTABLE_NAME_P (name))
29678 RS6000_OUTPUT_BASENAME (file, name);
29679 if (offset < 0)
29680 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29681 else if (offset > 0)
29682 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29684 else
29685 output_addr_const (file, x);
29687 #if HAVE_AS_TLS
29688 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29690 switch (SYMBOL_REF_TLS_MODEL (base))
29692 case 0:
29693 break;
29694 case TLS_MODEL_LOCAL_EXEC:
29695 fputs ("@le", file);
29696 break;
29697 case TLS_MODEL_INITIAL_EXEC:
29698 fputs ("@ie", file);
29699 break;
29700 /* Use global-dynamic for local-dynamic. */
29701 case TLS_MODEL_GLOBAL_DYNAMIC:
29702 case TLS_MODEL_LOCAL_DYNAMIC:
29703 putc ('\n', file);
29704 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29705 fputs ("\t.tc .", file);
29706 RS6000_OUTPUT_BASENAME (file, name);
29707 fputs ("[TC],", file);
29708 output_addr_const (file, x);
29709 fputs ("@m", file);
29710 break;
29711 default:
29712 gcc_unreachable ();
29715 #endif
29717 putc ('\n', file);
29720 /* Output an assembler pseudo-op to write an ASCII string of N characters
29721 starting at P to FILE.
29723 On the RS/6000, we have to do this using the .byte operation and
29724 write out special characters outside the quoted string.
29725 Also, the assembler is broken; very long strings are truncated,
29726 so we must artificially break them up early. */
29728 void
29729 output_ascii (FILE *file, const char *p, int n)
29731 char c;
29732 int i, count_string;
29733 const char *for_string = "\t.byte \"";
29734 const char *for_decimal = "\t.byte ";
29735 const char *to_close = NULL;
29737 count_string = 0;
29738 for (i = 0; i < n; i++)
29740 c = *p++;
29741 if (c >= ' ' && c < 0177)
29743 if (for_string)
29744 fputs (for_string, file);
29745 putc (c, file);
29747 /* Write two quotes to get one. */
29748 if (c == '"')
29750 putc (c, file);
29751 ++count_string;
29754 for_string = NULL;
29755 for_decimal = "\"\n\t.byte ";
29756 to_close = "\"\n";
29757 ++count_string;
29759 if (count_string >= 512)
29761 fputs (to_close, file);
29763 for_string = "\t.byte \"";
29764 for_decimal = "\t.byte ";
29765 to_close = NULL;
29766 count_string = 0;
29769 else
29771 if (for_decimal)
29772 fputs (for_decimal, file);
29773 fprintf (file, "%d", c);
29775 for_string = "\n\t.byte \"";
29776 for_decimal = ", ";
29777 to_close = "\n";
29778 count_string = 0;
29782 /* Now close the string if we have written one. Then end the line. */
29783 if (to_close)
29784 fputs (to_close, file);
29787 /* Generate a unique section name for FILENAME for a section type
29788 represented by SECTION_DESC. Output goes into BUF.
29790 SECTION_DESC can be any string, as long as it is different for each
29791 possible section type.
29793 We name the section in the same manner as xlc. The name begins with an
29794 underscore followed by the filename (after stripping any leading directory
29795 names) with the last period replaced by the string SECTION_DESC. If
29796 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29797 the name. */
29799 void
29800 rs6000_gen_section_name (char **buf, const char *filename,
29801 const char *section_desc)
29803 const char *q, *after_last_slash, *last_period = 0;
29804 char *p;
29805 int len;
29807 after_last_slash = filename;
29808 for (q = filename; *q; q++)
29810 if (*q == '/')
29811 after_last_slash = q + 1;
29812 else if (*q == '.')
29813 last_period = q;
29816 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29817 *buf = (char *) xmalloc (len);
29819 p = *buf;
29820 *p++ = '_';
29822 for (q = after_last_slash; *q; q++)
29824 if (q == last_period)
29826 strcpy (p, section_desc);
29827 p += strlen (section_desc);
29828 break;
29831 else if (ISALNUM (*q))
29832 *p++ = *q;
29835 if (last_period == 0)
29836 strcpy (p, section_desc);
29837 else
29838 *p = '\0';
29841 /* Emit profile function. */
29843 void
29844 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29846 /* Non-standard profiling for kernels, which just saves LR then calls
29847 _mcount without worrying about arg saves. The idea is to change
29848 the function prologue as little as possible as it isn't easy to
29849 account for arg save/restore code added just for _mcount. */
29850 if (TARGET_PROFILE_KERNEL)
29851 return;
29853 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29855 #ifndef NO_PROFILE_COUNTERS
29856 # define NO_PROFILE_COUNTERS 0
29857 #endif
29858 if (NO_PROFILE_COUNTERS)
29859 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29860 LCT_NORMAL, VOIDmode, 0);
29861 else
29863 char buf[30];
29864 const char *label_name;
29865 rtx fun;
29867 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29868 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29869 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29871 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29872 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
29875 else if (DEFAULT_ABI == ABI_DARWIN)
29877 const char *mcount_name = RS6000_MCOUNT;
29878 int caller_addr_regno = LR_REGNO;
29880 /* Be conservative and always set this, at least for now. */
29881 crtl->uses_pic_offset_table = 1;
29883 #if TARGET_MACHO
29884 /* For PIC code, set up a stub and collect the caller's address
29885 from r0, which is where the prologue puts it. */
29886 if (MACHOPIC_INDIRECT
29887 && crtl->uses_pic_offset_table)
29888 caller_addr_regno = 0;
29889 #endif
29890 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29891 LCT_NORMAL, VOIDmode, 1,
29892 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29896 /* Write function profiler code. */
29898 void
29899 output_function_profiler (FILE *file, int labelno)
29901 char buf[100];
29903 switch (DEFAULT_ABI)
29905 default:
29906 gcc_unreachable ();
29908 case ABI_V4:
29909 if (!TARGET_32BIT)
29911 warning (0, "no profiling of 64-bit code for this ABI");
29912 return;
29914 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29915 fprintf (file, "\tmflr %s\n", reg_names[0]);
29916 if (NO_PROFILE_COUNTERS)
29918 asm_fprintf (file, "\tstw %s,4(%s)\n",
29919 reg_names[0], reg_names[1]);
29921 else if (TARGET_SECURE_PLT && flag_pic)
29923 if (TARGET_LINK_STACK)
29925 char name[32];
29926 get_ppc476_thunk_name (name);
29927 asm_fprintf (file, "\tbl %s\n", name);
29929 else
29930 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
29931 asm_fprintf (file, "\tstw %s,4(%s)\n",
29932 reg_names[0], reg_names[1]);
29933 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29934 asm_fprintf (file, "\taddis %s,%s,",
29935 reg_names[12], reg_names[12]);
29936 assemble_name (file, buf);
29937 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
29938 assemble_name (file, buf);
29939 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
29941 else if (flag_pic == 1)
29943 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
29944 asm_fprintf (file, "\tstw %s,4(%s)\n",
29945 reg_names[0], reg_names[1]);
29946 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29947 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
29948 assemble_name (file, buf);
29949 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
29951 else if (flag_pic > 1)
29953 asm_fprintf (file, "\tstw %s,4(%s)\n",
29954 reg_names[0], reg_names[1]);
29955 /* Now, we need to get the address of the label. */
29956 if (TARGET_LINK_STACK)
29958 char name[32];
29959 get_ppc476_thunk_name (name);
29960 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
29961 assemble_name (file, buf);
29962 fputs ("-.\n1:", file);
29963 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29964 asm_fprintf (file, "\taddi %s,%s,4\n",
29965 reg_names[11], reg_names[11]);
29967 else
29969 fputs ("\tbcl 20,31,1f\n\t.long ", file);
29970 assemble_name (file, buf);
29971 fputs ("-.\n1:", file);
29972 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29974 asm_fprintf (file, "\tlwz %s,0(%s)\n",
29975 reg_names[0], reg_names[11]);
29976 asm_fprintf (file, "\tadd %s,%s,%s\n",
29977 reg_names[0], reg_names[0], reg_names[11]);
29979 else
29981 asm_fprintf (file, "\tlis %s,", reg_names[12]);
29982 assemble_name (file, buf);
29983 fputs ("@ha\n", file);
29984 asm_fprintf (file, "\tstw %s,4(%s)\n",
29985 reg_names[0], reg_names[1]);
29986 asm_fprintf (file, "\tla %s,", reg_names[0]);
29987 assemble_name (file, buf);
29988 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
29991 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29992 fprintf (file, "\tbl %s%s\n",
29993 RS6000_MCOUNT, flag_pic ? "@plt" : "");
29994 break;
29996 case ABI_AIX:
29997 case ABI_ELFv2:
29998 case ABI_DARWIN:
29999 /* Don't do anything, done in output_profile_hook (). */
30000 break;
30006 /* The following variable value is the last issued insn. */
30008 static rtx_insn *last_scheduled_insn;
30010 /* The following variable helps to balance issuing of load and
30011 store instructions */
30013 static int load_store_pendulum;
30015 /* The following variable helps pair divide insns during scheduling. */
30016 static int divide_cnt;
30017 /* The following variable helps pair and alternate vector and vector load
30018 insns during scheduling. */
30019 static int vec_load_pendulum;
30022 /* Power4 load update and store update instructions are cracked into a
30023 load or store and an integer insn which are executed in the same cycle.
30024 Branches have their own dispatch slot which does not count against the
30025 GCC issue rate, but it changes the program flow so there are no other
30026 instructions to issue in this cycle. */
30028 static int
30029 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30031 last_scheduled_insn = insn;
30032 if (GET_CODE (PATTERN (insn)) == USE
30033 || GET_CODE (PATTERN (insn)) == CLOBBER)
30035 cached_can_issue_more = more;
30036 return cached_can_issue_more;
30039 if (insn_terminates_group_p (insn, current_group))
30041 cached_can_issue_more = 0;
30042 return cached_can_issue_more;
30045 /* If no reservation, but reach here */
30046 if (recog_memoized (insn) < 0)
30047 return more;
30049 if (rs6000_sched_groups)
30051 if (is_microcoded_insn (insn))
30052 cached_can_issue_more = 0;
30053 else if (is_cracked_insn (insn))
30054 cached_can_issue_more = more > 2 ? more - 2 : 0;
30055 else
30056 cached_can_issue_more = more - 1;
30058 return cached_can_issue_more;
30061 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30062 return 0;
30064 cached_can_issue_more = more - 1;
30065 return cached_can_issue_more;
30068 static int
30069 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30071 int r = rs6000_variable_issue_1 (insn, more);
30072 if (verbose)
30073 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30074 return r;
30077 /* Adjust the cost of a scheduling dependency. Return the new cost of
30078 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30080 static int
30081 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30082 unsigned int)
30084 enum attr_type attr_type;
30086 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30087 return cost;
30089 switch (dep_type)
30091 case REG_DEP_TRUE:
30093 /* Data dependency; DEP_INSN writes a register that INSN reads
30094 some cycles later. */
30096 /* Separate a load from a narrower, dependent store. */
30097 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30098 && GET_CODE (PATTERN (insn)) == SET
30099 && GET_CODE (PATTERN (dep_insn)) == SET
30100 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30101 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30102 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30103 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30104 return cost + 14;
30106 attr_type = get_attr_type (insn);
30108 switch (attr_type)
30110 case TYPE_JMPREG:
30111 /* Tell the first scheduling pass about the latency between
30112 a mtctr and bctr (and mtlr and br/blr). The first
30113 scheduling pass will not know about this latency since
30114 the mtctr instruction, which has the latency associated
30115 to it, will be generated by reload. */
30116 return 4;
30117 case TYPE_BRANCH:
30118 /* Leave some extra cycles between a compare and its
30119 dependent branch, to inhibit expensive mispredicts. */
30120 if ((rs6000_cpu_attr == CPU_PPC603
30121 || rs6000_cpu_attr == CPU_PPC604
30122 || rs6000_cpu_attr == CPU_PPC604E
30123 || rs6000_cpu_attr == CPU_PPC620
30124 || rs6000_cpu_attr == CPU_PPC630
30125 || rs6000_cpu_attr == CPU_PPC750
30126 || rs6000_cpu_attr == CPU_PPC7400
30127 || rs6000_cpu_attr == CPU_PPC7450
30128 || rs6000_cpu_attr == CPU_PPCE5500
30129 || rs6000_cpu_attr == CPU_PPCE6500
30130 || rs6000_cpu_attr == CPU_POWER4
30131 || rs6000_cpu_attr == CPU_POWER5
30132 || rs6000_cpu_attr == CPU_POWER7
30133 || rs6000_cpu_attr == CPU_POWER8
30134 || rs6000_cpu_attr == CPU_POWER9
30135 || rs6000_cpu_attr == CPU_CELL)
30136 && recog_memoized (dep_insn)
30137 && (INSN_CODE (dep_insn) >= 0))
30139 switch (get_attr_type (dep_insn))
30141 case TYPE_CMP:
30142 case TYPE_FPCOMPARE:
30143 case TYPE_CR_LOGICAL:
30144 case TYPE_DELAYED_CR:
30145 return cost + 2;
30146 case TYPE_EXTS:
30147 case TYPE_MUL:
30148 if (get_attr_dot (dep_insn) == DOT_YES)
30149 return cost + 2;
30150 else
30151 break;
30152 case TYPE_SHIFT:
30153 if (get_attr_dot (dep_insn) == DOT_YES
30154 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30155 return cost + 2;
30156 else
30157 break;
30158 default:
30159 break;
30161 break;
30163 case TYPE_STORE:
30164 case TYPE_FPSTORE:
30165 if ((rs6000_cpu == PROCESSOR_POWER6)
30166 && recog_memoized (dep_insn)
30167 && (INSN_CODE (dep_insn) >= 0))
30170 if (GET_CODE (PATTERN (insn)) != SET)
30171 /* If this happens, we have to extend this to schedule
30172 optimally. Return default for now. */
30173 return cost;
30175 /* Adjust the cost for the case where the value written
30176 by a fixed point operation is used as the address
30177 gen value on a store. */
30178 switch (get_attr_type (dep_insn))
30180 case TYPE_LOAD:
30181 case TYPE_CNTLZ:
30183 if (! store_data_bypass_p (dep_insn, insn))
30184 return get_attr_sign_extend (dep_insn)
30185 == SIGN_EXTEND_YES ? 6 : 4;
30186 break;
30188 case TYPE_SHIFT:
30190 if (! store_data_bypass_p (dep_insn, insn))
30191 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30192 6 : 3;
30193 break;
30195 case TYPE_INTEGER:
30196 case TYPE_ADD:
30197 case TYPE_LOGICAL:
30198 case TYPE_EXTS:
30199 case TYPE_INSERT:
30201 if (! store_data_bypass_p (dep_insn, insn))
30202 return 3;
30203 break;
30205 case TYPE_STORE:
30206 case TYPE_FPLOAD:
30207 case TYPE_FPSTORE:
30209 if (get_attr_update (dep_insn) == UPDATE_YES
30210 && ! store_data_bypass_p (dep_insn, insn))
30211 return 3;
30212 break;
30214 case TYPE_MUL:
30216 if (! store_data_bypass_p (dep_insn, insn))
30217 return 17;
30218 break;
30220 case TYPE_DIV:
30222 if (! store_data_bypass_p (dep_insn, insn))
30223 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30224 break;
30226 default:
30227 break;
30230 break;
30232 case TYPE_LOAD:
30233 if ((rs6000_cpu == PROCESSOR_POWER6)
30234 && recog_memoized (dep_insn)
30235 && (INSN_CODE (dep_insn) >= 0))
30238 /* Adjust the cost for the case where the value written
30239 by a fixed point instruction is used within the address
30240 gen portion of a subsequent load(u)(x) */
30241 switch (get_attr_type (dep_insn))
30243 case TYPE_LOAD:
30244 case TYPE_CNTLZ:
30246 if (set_to_load_agen (dep_insn, insn))
30247 return get_attr_sign_extend (dep_insn)
30248 == SIGN_EXTEND_YES ? 6 : 4;
30249 break;
30251 case TYPE_SHIFT:
30253 if (set_to_load_agen (dep_insn, insn))
30254 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30255 6 : 3;
30256 break;
30258 case TYPE_INTEGER:
30259 case TYPE_ADD:
30260 case TYPE_LOGICAL:
30261 case TYPE_EXTS:
30262 case TYPE_INSERT:
30264 if (set_to_load_agen (dep_insn, insn))
30265 return 3;
30266 break;
30268 case TYPE_STORE:
30269 case TYPE_FPLOAD:
30270 case TYPE_FPSTORE:
30272 if (get_attr_update (dep_insn) == UPDATE_YES
30273 && set_to_load_agen (dep_insn, insn))
30274 return 3;
30275 break;
30277 case TYPE_MUL:
30279 if (set_to_load_agen (dep_insn, insn))
30280 return 17;
30281 break;
30283 case TYPE_DIV:
30285 if (set_to_load_agen (dep_insn, insn))
30286 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30287 break;
30289 default:
30290 break;
30293 break;
30295 case TYPE_FPLOAD:
30296 if ((rs6000_cpu == PROCESSOR_POWER6)
30297 && get_attr_update (insn) == UPDATE_NO
30298 && recog_memoized (dep_insn)
30299 && (INSN_CODE (dep_insn) >= 0)
30300 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30301 return 2;
30303 default:
30304 break;
30307 /* Fall out to return default cost. */
30309 break;
30311 case REG_DEP_OUTPUT:
30312 /* Output dependency; DEP_INSN writes a register that INSN writes some
30313 cycles later. */
30314 if ((rs6000_cpu == PROCESSOR_POWER6)
30315 && recog_memoized (dep_insn)
30316 && (INSN_CODE (dep_insn) >= 0))
30318 attr_type = get_attr_type (insn);
30320 switch (attr_type)
30322 case TYPE_FP:
30323 case TYPE_FPSIMPLE:
30324 if (get_attr_type (dep_insn) == TYPE_FP
30325 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30326 return 1;
30327 break;
30328 case TYPE_FPLOAD:
30329 if (get_attr_update (insn) == UPDATE_NO
30330 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30331 return 2;
30332 break;
30333 default:
30334 break;
30337 /* Fall through, no cost for output dependency. */
30339 case REG_DEP_ANTI:
30340 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30341 cycles later. */
30342 return 0;
30344 default:
30345 gcc_unreachable ();
30348 return cost;
30351 /* Debug version of rs6000_adjust_cost. */
30353 static int
30354 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30355 int cost, unsigned int dw)
30357 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30359 if (ret != cost)
30361 const char *dep;
30363 switch (dep_type)
30365 default: dep = "unknown depencency"; break;
30366 case REG_DEP_TRUE: dep = "data dependency"; break;
30367 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30368 case REG_DEP_ANTI: dep = "anti depencency"; break;
30371 fprintf (stderr,
30372 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30373 "%s, insn:\n", ret, cost, dep);
30375 debug_rtx (insn);
30378 return ret;
30381 /* The function returns a true if INSN is microcoded.
30382 Return false otherwise. */
30384 static bool
30385 is_microcoded_insn (rtx_insn *insn)
30387 if (!insn || !NONDEBUG_INSN_P (insn)
30388 || GET_CODE (PATTERN (insn)) == USE
30389 || GET_CODE (PATTERN (insn)) == CLOBBER)
30390 return false;
30392 if (rs6000_cpu_attr == CPU_CELL)
30393 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30395 if (rs6000_sched_groups
30396 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30398 enum attr_type type = get_attr_type (insn);
30399 if ((type == TYPE_LOAD
30400 && get_attr_update (insn) == UPDATE_YES
30401 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30402 || ((type == TYPE_LOAD || type == TYPE_STORE)
30403 && get_attr_update (insn) == UPDATE_YES
30404 && get_attr_indexed (insn) == INDEXED_YES)
30405 || type == TYPE_MFCR)
30406 return true;
30409 return false;
30412 /* The function returns true if INSN is cracked into 2 instructions
30413 by the processor (and therefore occupies 2 issue slots). */
30415 static bool
30416 is_cracked_insn (rtx_insn *insn)
30418 if (!insn || !NONDEBUG_INSN_P (insn)
30419 || GET_CODE (PATTERN (insn)) == USE
30420 || GET_CODE (PATTERN (insn)) == CLOBBER)
30421 return false;
30423 if (rs6000_sched_groups
30424 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30426 enum attr_type type = get_attr_type (insn);
30427 if ((type == TYPE_LOAD
30428 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30429 && get_attr_update (insn) == UPDATE_NO)
30430 || (type == TYPE_LOAD
30431 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30432 && get_attr_update (insn) == UPDATE_YES
30433 && get_attr_indexed (insn) == INDEXED_NO)
30434 || (type == TYPE_STORE
30435 && get_attr_update (insn) == UPDATE_YES
30436 && get_attr_indexed (insn) == INDEXED_NO)
30437 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30438 && get_attr_update (insn) == UPDATE_YES)
30439 || type == TYPE_DELAYED_CR
30440 || (type == TYPE_EXTS
30441 && get_attr_dot (insn) == DOT_YES)
30442 || (type == TYPE_SHIFT
30443 && get_attr_dot (insn) == DOT_YES
30444 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30445 || (type == TYPE_MUL
30446 && get_attr_dot (insn) == DOT_YES)
30447 || type == TYPE_DIV
30448 || (type == TYPE_INSERT
30449 && get_attr_size (insn) == SIZE_32))
30450 return true;
30453 return false;
30456 /* The function returns true if INSN can be issued only from
30457 the branch slot. */
30459 static bool
30460 is_branch_slot_insn (rtx_insn *insn)
30462 if (!insn || !NONDEBUG_INSN_P (insn)
30463 || GET_CODE (PATTERN (insn)) == USE
30464 || GET_CODE (PATTERN (insn)) == CLOBBER)
30465 return false;
30467 if (rs6000_sched_groups)
30469 enum attr_type type = get_attr_type (insn);
30470 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30471 return true;
30472 return false;
30475 return false;
30478 /* The function returns true if out_inst sets a value that is
30479 used in the address generation computation of in_insn */
30480 static bool
30481 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30483 rtx out_set, in_set;
30485 /* For performance reasons, only handle the simple case where
30486 both loads are a single_set. */
30487 out_set = single_set (out_insn);
30488 if (out_set)
30490 in_set = single_set (in_insn);
30491 if (in_set)
30492 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30495 return false;
30498 /* Try to determine base/offset/size parts of the given MEM.
30499 Return true if successful, false if all the values couldn't
30500 be determined.
30502 This function only looks for REG or REG+CONST address forms.
30503 REG+REG address form will return false. */
30505 static bool
30506 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30507 HOST_WIDE_INT *size)
30509 rtx addr_rtx;
30510 if MEM_SIZE_KNOWN_P (mem)
30511 *size = MEM_SIZE (mem);
30512 else
30513 return false;
30515 addr_rtx = (XEXP (mem, 0));
30516 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30517 addr_rtx = XEXP (addr_rtx, 1);
30519 *offset = 0;
30520 while (GET_CODE (addr_rtx) == PLUS
30521 && CONST_INT_P (XEXP (addr_rtx, 1)))
30523 *offset += INTVAL (XEXP (addr_rtx, 1));
30524 addr_rtx = XEXP (addr_rtx, 0);
30526 if (!REG_P (addr_rtx))
30527 return false;
30529 *base = addr_rtx;
30530 return true;
30533 /* The function returns true if the target storage location of
30534 mem1 is adjacent to the target storage location of mem2 */
30535 /* Return 1 if memory locations are adjacent. */
30537 static bool
30538 adjacent_mem_locations (rtx mem1, rtx mem2)
30540 rtx reg1, reg2;
30541 HOST_WIDE_INT off1, size1, off2, size2;
30543 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30544 && get_memref_parts (mem2, &reg2, &off2, &size2))
30545 return ((REGNO (reg1) == REGNO (reg2))
30546 && ((off1 + size1 == off2)
30547 || (off2 + size2 == off1)));
30549 return false;
30552 /* This function returns true if it can be determined that the two MEM
30553 locations overlap by at least 1 byte based on base reg/offset/size. */
30555 static bool
30556 mem_locations_overlap (rtx mem1, rtx mem2)
30558 rtx reg1, reg2;
30559 HOST_WIDE_INT off1, size1, off2, size2;
30561 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30562 && get_memref_parts (mem2, &reg2, &off2, &size2))
30563 return ((REGNO (reg1) == REGNO (reg2))
30564 && (((off1 <= off2) && (off1 + size1 > off2))
30565 || ((off2 <= off1) && (off2 + size2 > off1))));
30567 return false;
30570 /* A C statement (sans semicolon) to update the integer scheduling
30571 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30572 INSN earlier, reduce the priority to execute INSN later. Do not
30573 define this macro if you do not need to adjust the scheduling
30574 priorities of insns. */
30576 static int
30577 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30579 rtx load_mem, str_mem;
30580 /* On machines (like the 750) which have asymmetric integer units,
30581 where one integer unit can do multiply and divides and the other
30582 can't, reduce the priority of multiply/divide so it is scheduled
30583 before other integer operations. */
30585 #if 0
30586 if (! INSN_P (insn))
30587 return priority;
30589 if (GET_CODE (PATTERN (insn)) == USE)
30590 return priority;
30592 switch (rs6000_cpu_attr) {
30593 case CPU_PPC750:
30594 switch (get_attr_type (insn))
30596 default:
30597 break;
30599 case TYPE_MUL:
30600 case TYPE_DIV:
30601 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30602 priority, priority);
30603 if (priority >= 0 && priority < 0x01000000)
30604 priority >>= 3;
30605 break;
30608 #endif
30610 if (insn_must_be_first_in_group (insn)
30611 && reload_completed
30612 && current_sched_info->sched_max_insns_priority
30613 && rs6000_sched_restricted_insns_priority)
30616 /* Prioritize insns that can be dispatched only in the first
30617 dispatch slot. */
30618 if (rs6000_sched_restricted_insns_priority == 1)
30619 /* Attach highest priority to insn. This means that in
30620 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30621 precede 'priority' (critical path) considerations. */
30622 return current_sched_info->sched_max_insns_priority;
30623 else if (rs6000_sched_restricted_insns_priority == 2)
30624 /* Increase priority of insn by a minimal amount. This means that in
30625 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30626 considerations precede dispatch-slot restriction considerations. */
30627 return (priority + 1);
30630 if (rs6000_cpu == PROCESSOR_POWER6
30631 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30632 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30633 /* Attach highest priority to insn if the scheduler has just issued two
30634 stores and this instruction is a load, or two loads and this instruction
30635 is a store. Power6 wants loads and stores scheduled alternately
30636 when possible */
30637 return current_sched_info->sched_max_insns_priority;
30639 return priority;
30642 /* Return true if the instruction is nonpipelined on the Cell. */
30643 static bool
30644 is_nonpipeline_insn (rtx_insn *insn)
30646 enum attr_type type;
30647 if (!insn || !NONDEBUG_INSN_P (insn)
30648 || GET_CODE (PATTERN (insn)) == USE
30649 || GET_CODE (PATTERN (insn)) == CLOBBER)
30650 return false;
30652 type = get_attr_type (insn);
30653 if (type == TYPE_MUL
30654 || type == TYPE_DIV
30655 || type == TYPE_SDIV
30656 || type == TYPE_DDIV
30657 || type == TYPE_SSQRT
30658 || type == TYPE_DSQRT
30659 || type == TYPE_MFCR
30660 || type == TYPE_MFCRF
30661 || type == TYPE_MFJMPR)
30663 return true;
30665 return false;
30669 /* Return how many instructions the machine can issue per cycle. */
30671 static int
30672 rs6000_issue_rate (void)
30674 /* Unless scheduling for register pressure, use issue rate of 1 for
30675 first scheduling pass to decrease degradation. */
30676 if (!reload_completed && !flag_sched_pressure)
30677 return 1;
30679 switch (rs6000_cpu_attr) {
30680 case CPU_RS64A:
30681 case CPU_PPC601: /* ? */
30682 case CPU_PPC7450:
30683 return 3;
30684 case CPU_PPC440:
30685 case CPU_PPC603:
30686 case CPU_PPC750:
30687 case CPU_PPC7400:
30688 case CPU_PPC8540:
30689 case CPU_PPC8548:
30690 case CPU_CELL:
30691 case CPU_PPCE300C2:
30692 case CPU_PPCE300C3:
30693 case CPU_PPCE500MC:
30694 case CPU_PPCE500MC64:
30695 case CPU_PPCE5500:
30696 case CPU_PPCE6500:
30697 case CPU_TITAN:
30698 return 2;
30699 case CPU_PPC476:
30700 case CPU_PPC604:
30701 case CPU_PPC604E:
30702 case CPU_PPC620:
30703 case CPU_PPC630:
30704 return 4;
30705 case CPU_POWER4:
30706 case CPU_POWER5:
30707 case CPU_POWER6:
30708 case CPU_POWER7:
30709 return 5;
30710 case CPU_POWER8:
30711 return 7;
30712 case CPU_POWER9:
30713 return 6;
30714 default:
30715 return 1;
30719 /* Return how many instructions to look ahead for better insn
30720 scheduling. */
30722 static int
30723 rs6000_use_sched_lookahead (void)
30725 switch (rs6000_cpu_attr)
30727 case CPU_PPC8540:
30728 case CPU_PPC8548:
30729 return 4;
30731 case CPU_CELL:
30732 return (reload_completed ? 8 : 0);
30734 default:
30735 return 0;
30739 /* We are choosing insn from the ready queue. Return zero if INSN can be
30740 chosen. */
30741 static int
30742 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30744 if (ready_index == 0)
30745 return 0;
30747 if (rs6000_cpu_attr != CPU_CELL)
30748 return 0;
30750 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30752 if (!reload_completed
30753 || is_nonpipeline_insn (insn)
30754 || is_microcoded_insn (insn))
30755 return 1;
30757 return 0;
30760 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30761 and return true. */
30763 static bool
30764 find_mem_ref (rtx pat, rtx *mem_ref)
30766 const char * fmt;
30767 int i, j;
30769 /* stack_tie does not produce any real memory traffic. */
30770 if (tie_operand (pat, VOIDmode))
30771 return false;
30773 if (GET_CODE (pat) == MEM)
30775 *mem_ref = pat;
30776 return true;
30779 /* Recursively process the pattern. */
30780 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30782 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30784 if (fmt[i] == 'e')
30786 if (find_mem_ref (XEXP (pat, i), mem_ref))
30787 return true;
30789 else if (fmt[i] == 'E')
30790 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30792 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30793 return true;
30797 return false;
30800 /* Determine if PAT is a PATTERN of a load insn. */
30802 static bool
30803 is_load_insn1 (rtx pat, rtx *load_mem)
30805 if (!pat || pat == NULL_RTX)
30806 return false;
30808 if (GET_CODE (pat) == SET)
30809 return find_mem_ref (SET_SRC (pat), load_mem);
30811 if (GET_CODE (pat) == PARALLEL)
30813 int i;
30815 for (i = 0; i < XVECLEN (pat, 0); i++)
30816 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30817 return true;
30820 return false;
30823 /* Determine if INSN loads from memory. */
30825 static bool
30826 is_load_insn (rtx insn, rtx *load_mem)
30828 if (!insn || !INSN_P (insn))
30829 return false;
30831 if (CALL_P (insn))
30832 return false;
30834 return is_load_insn1 (PATTERN (insn), load_mem);
30837 /* Determine if PAT is a PATTERN of a store insn. */
30839 static bool
30840 is_store_insn1 (rtx pat, rtx *str_mem)
30842 if (!pat || pat == NULL_RTX)
30843 return false;
30845 if (GET_CODE (pat) == SET)
30846 return find_mem_ref (SET_DEST (pat), str_mem);
30848 if (GET_CODE (pat) == PARALLEL)
30850 int i;
30852 for (i = 0; i < XVECLEN (pat, 0); i++)
30853 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30854 return true;
30857 return false;
30860 /* Determine if INSN stores to memory. */
30862 static bool
30863 is_store_insn (rtx insn, rtx *str_mem)
30865 if (!insn || !INSN_P (insn))
30866 return false;
30868 return is_store_insn1 (PATTERN (insn), str_mem);
30871 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30873 static bool
30874 is_power9_pairable_vec_type (enum attr_type type)
30876 switch (type)
30878 case TYPE_VECSIMPLE:
30879 case TYPE_VECCOMPLEX:
30880 case TYPE_VECDIV:
30881 case TYPE_VECCMP:
30882 case TYPE_VECPERM:
30883 case TYPE_VECFLOAT:
30884 case TYPE_VECFDIV:
30885 case TYPE_VECDOUBLE:
30886 return true;
30887 default:
30888 break;
30890 return false;
30893 /* Returns whether the dependence between INSN and NEXT is considered
30894 costly by the given target. */
30896 static bool
30897 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30899 rtx insn;
30900 rtx next;
30901 rtx load_mem, str_mem;
30903 /* If the flag is not enabled - no dependence is considered costly;
30904 allow all dependent insns in the same group.
30905 This is the most aggressive option. */
30906 if (rs6000_sched_costly_dep == no_dep_costly)
30907 return false;
30909 /* If the flag is set to 1 - a dependence is always considered costly;
30910 do not allow dependent instructions in the same group.
30911 This is the most conservative option. */
30912 if (rs6000_sched_costly_dep == all_deps_costly)
30913 return true;
30915 insn = DEP_PRO (dep);
30916 next = DEP_CON (dep);
30918 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30919 && is_load_insn (next, &load_mem)
30920 && is_store_insn (insn, &str_mem))
30921 /* Prevent load after store in the same group. */
30922 return true;
30924 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30925 && is_load_insn (next, &load_mem)
30926 && is_store_insn (insn, &str_mem)
30927 && DEP_TYPE (dep) == REG_DEP_TRUE
30928 && mem_locations_overlap(str_mem, load_mem))
30929 /* Prevent load after store in the same group if it is a true
30930 dependence. */
30931 return true;
30933 /* The flag is set to X; dependences with latency >= X are considered costly,
30934 and will not be scheduled in the same group. */
30935 if (rs6000_sched_costly_dep <= max_dep_latency
30936 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
30937 return true;
30939 return false;
30942 /* Return the next insn after INSN that is found before TAIL is reached,
30943 skipping any "non-active" insns - insns that will not actually occupy
30944 an issue slot. Return NULL_RTX if such an insn is not found. */
30946 static rtx_insn *
30947 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
30949 if (insn == NULL_RTX || insn == tail)
30950 return NULL;
30952 while (1)
30954 insn = NEXT_INSN (insn);
30955 if (insn == NULL_RTX || insn == tail)
30956 return NULL;
30958 if (CALL_P (insn)
30959 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
30960 || (NONJUMP_INSN_P (insn)
30961 && GET_CODE (PATTERN (insn)) != USE
30962 && GET_CODE (PATTERN (insn)) != CLOBBER
30963 && INSN_CODE (insn) != CODE_FOR_stack_tie))
30964 break;
30966 return insn;
30969 /* Do Power9 specific sched_reorder2 reordering of ready list. */
30971 static int
30972 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
30974 int pos;
30975 int i;
30976 rtx_insn *tmp;
30977 enum attr_type type;
30979 type = get_attr_type (last_scheduled_insn);
30981 /* Try to issue fixed point divides back-to-back in pairs so they will be
30982 routed to separate execution units and execute in parallel. */
30983 if (type == TYPE_DIV && divide_cnt == 0)
30985 /* First divide has been scheduled. */
30986 divide_cnt = 1;
30988 /* Scan the ready list looking for another divide, if found move it
30989 to the end of the list so it is chosen next. */
30990 pos = lastpos;
30991 while (pos >= 0)
30993 if (recog_memoized (ready[pos]) >= 0
30994 && get_attr_type (ready[pos]) == TYPE_DIV)
30996 tmp = ready[pos];
30997 for (i = pos; i < lastpos; i++)
30998 ready[i] = ready[i + 1];
30999 ready[lastpos] = tmp;
31000 break;
31002 pos--;
31005 else
31007 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31008 divide_cnt = 0;
31010 /* Power9 can execute 2 vector operations and 2 vector loads in a single
31011 cycle. So try to pair up and alternate groups of vector and vector
31012 load instructions.
31014 To aid this formation, a counter is maintained to keep track of
31015 vec/vecload insns issued. The value of vec_load_pendulum maintains
31016 the current state with the following values:
31018 0 : Initial state, no vec/vecload group has been started.
31020 -1 : 1 vector load has been issued and another has been found on
31021 the ready list and moved to the end.
31023 -2 : 2 vector loads have been issued and a vector operation has
31024 been found and moved to the end of the ready list.
31026 -3 : 2 vector loads and a vector insn have been issued and a
31027 vector operation has been found and moved to the end of the
31028 ready list.
31030 1 : 1 vector insn has been issued and another has been found and
31031 moved to the end of the ready list.
31033 2 : 2 vector insns have been issued and a vector load has been
31034 found and moved to the end of the ready list.
31036 3 : 2 vector insns and a vector load have been issued and another
31037 vector load has been found and moved to the end of the ready
31038 list. */
31039 if (type == TYPE_VECLOAD)
31041 /* Issued a vecload. */
31042 if (vec_load_pendulum == 0)
31044 /* We issued a single vecload, look for another and move it to
31045 the end of the ready list so it will be scheduled next.
31046 Set pendulum if found. */
31047 pos = lastpos;
31048 while (pos >= 0)
31050 if (recog_memoized (ready[pos]) >= 0
31051 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
31053 tmp = ready[pos];
31054 for (i = pos; i < lastpos; i++)
31055 ready[i] = ready[i + 1];
31056 ready[lastpos] = tmp;
31057 vec_load_pendulum = -1;
31058 return cached_can_issue_more;
31060 pos--;
31063 else if (vec_load_pendulum == -1)
31065 /* This is the second vecload we've issued, search the ready
31066 list for a vector operation so we can try to schedule a
31067 pair of those next. If found move to the end of the ready
31068 list so it is scheduled next and set the pendulum. */
31069 pos = lastpos;
31070 while (pos >= 0)
31072 if (recog_memoized (ready[pos]) >= 0
31073 && is_power9_pairable_vec_type (
31074 get_attr_type (ready[pos])))
31076 tmp = ready[pos];
31077 for (i = pos; i < lastpos; i++)
31078 ready[i] = ready[i + 1];
31079 ready[lastpos] = tmp;
31080 vec_load_pendulum = -2;
31081 return cached_can_issue_more;
31083 pos--;
31086 else if (vec_load_pendulum == 2)
31088 /* Two vector ops have been issued and we've just issued a
31089 vecload, look for another vecload and move to end of ready
31090 list if found. */
31091 pos = lastpos;
31092 while (pos >= 0)
31094 if (recog_memoized (ready[pos]) >= 0
31095 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
31097 tmp = ready[pos];
31098 for (i = pos; i < lastpos; i++)
31099 ready[i] = ready[i + 1];
31100 ready[lastpos] = tmp;
31101 /* Set pendulum so that next vecload will be seen as
31102 finishing a group, not start of one. */
31103 vec_load_pendulum = 3;
31104 return cached_can_issue_more;
31106 pos--;
31110 else if (is_power9_pairable_vec_type (type))
31112 /* Issued a vector operation. */
31113 if (vec_load_pendulum == 0)
31114 /* We issued a single vec op, look for another and move it
31115 to the end of the ready list so it will be scheduled next.
31116 Set pendulum if found. */
31118 pos = lastpos;
31119 while (pos >= 0)
31121 if (recog_memoized (ready[pos]) >= 0
31122 && is_power9_pairable_vec_type (
31123 get_attr_type (ready[pos])))
31125 tmp = ready[pos];
31126 for (i = pos; i < lastpos; i++)
31127 ready[i] = ready[i + 1];
31128 ready[lastpos] = tmp;
31129 vec_load_pendulum = 1;
31130 return cached_can_issue_more;
31132 pos--;
31135 else if (vec_load_pendulum == 1)
31137 /* This is the second vec op we've issued, search the ready
31138 list for a vecload operation so we can try to schedule a
31139 pair of those next. If found move to the end of the ready
31140 list so it is scheduled next and set the pendulum. */
31141 pos = lastpos;
31142 while (pos >= 0)
31144 if (recog_memoized (ready[pos]) >= 0
31145 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
31147 tmp = ready[pos];
31148 for (i = pos; i < lastpos; i++)
31149 ready[i] = ready[i + 1];
31150 ready[lastpos] = tmp;
31151 vec_load_pendulum = 2;
31152 return cached_can_issue_more;
31154 pos--;
31157 else if (vec_load_pendulum == -2)
31159 /* Two vecload ops have been issued and we've just issued a
31160 vec op, look for another vec op and move to end of ready
31161 list if found. */
31162 pos = lastpos;
31163 while (pos >= 0)
31165 if (recog_memoized (ready[pos]) >= 0
31166 && is_power9_pairable_vec_type (
31167 get_attr_type (ready[pos])))
31169 tmp = ready[pos];
31170 for (i = pos; i < lastpos; i++)
31171 ready[i] = ready[i + 1];
31172 ready[lastpos] = tmp;
31173 /* Set pendulum so that next vec op will be seen as
31174 finishing a group, not start of one. */
31175 vec_load_pendulum = -3;
31176 return cached_can_issue_more;
31178 pos--;
31183 /* We've either finished a vec/vecload group, couldn't find an insn to
31184 continue the current group, or the last insn had nothing to do with
31185 with a group. In any case, reset the pendulum. */
31186 vec_load_pendulum = 0;
31189 return cached_can_issue_more;
31192 /* We are about to begin issuing insns for this clock cycle. */
31194 static int
31195 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31196 rtx_insn **ready ATTRIBUTE_UNUSED,
31197 int *pn_ready ATTRIBUTE_UNUSED,
31198 int clock_var ATTRIBUTE_UNUSED)
31200 int n_ready = *pn_ready;
31202 if (sched_verbose)
31203 fprintf (dump, "// rs6000_sched_reorder :\n");
31205 /* Reorder the ready list, if the second to last ready insn
31206 is a nonepipeline insn. */
31207 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31209 if (is_nonpipeline_insn (ready[n_ready - 1])
31210 && (recog_memoized (ready[n_ready - 2]) > 0))
31211 /* Simply swap first two insns. */
31212 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31215 if (rs6000_cpu == PROCESSOR_POWER6)
31216 load_store_pendulum = 0;
31218 return rs6000_issue_rate ();
31221 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31223 static int
31224 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31225 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31227 if (sched_verbose)
31228 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31230 /* For Power6, we need to handle some special cases to try and keep the
31231 store queue from overflowing and triggering expensive flushes.
31233 This code monitors how load and store instructions are being issued
31234 and skews the ready list one way or the other to increase the likelihood
31235 that a desired instruction is issued at the proper time.
31237 A couple of things are done. First, we maintain a "load_store_pendulum"
31238 to track the current state of load/store issue.
31240 - If the pendulum is at zero, then no loads or stores have been
31241 issued in the current cycle so we do nothing.
31243 - If the pendulum is 1, then a single load has been issued in this
31244 cycle and we attempt to locate another load in the ready list to
31245 issue with it.
31247 - If the pendulum is -2, then two stores have already been
31248 issued in this cycle, so we increase the priority of the first load
31249 in the ready list to increase it's likelihood of being chosen first
31250 in the next cycle.
31252 - If the pendulum is -1, then a single store has been issued in this
31253 cycle and we attempt to locate another store in the ready list to
31254 issue with it, preferring a store to an adjacent memory location to
31255 facilitate store pairing in the store queue.
31257 - If the pendulum is 2, then two loads have already been
31258 issued in this cycle, so we increase the priority of the first store
31259 in the ready list to increase it's likelihood of being chosen first
31260 in the next cycle.
31262 - If the pendulum < -2 or > 2, then do nothing.
31264 Note: This code covers the most common scenarios. There exist non
31265 load/store instructions which make use of the LSU and which
31266 would need to be accounted for to strictly model the behavior
31267 of the machine. Those instructions are currently unaccounted
31268 for to help minimize compile time overhead of this code.
31270 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31272 int pos;
31273 int i;
31274 rtx_insn *tmp;
31275 rtx load_mem, str_mem;
31277 if (is_store_insn (last_scheduled_insn, &str_mem))
31278 /* Issuing a store, swing the load_store_pendulum to the left */
31279 load_store_pendulum--;
31280 else if (is_load_insn (last_scheduled_insn, &load_mem))
31281 /* Issuing a load, swing the load_store_pendulum to the right */
31282 load_store_pendulum++;
31283 else
31284 return cached_can_issue_more;
31286 /* If the pendulum is balanced, or there is only one instruction on
31287 the ready list, then all is well, so return. */
31288 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31289 return cached_can_issue_more;
31291 if (load_store_pendulum == 1)
31293 /* A load has been issued in this cycle. Scan the ready list
31294 for another load to issue with it */
31295 pos = *pn_ready-1;
31297 while (pos >= 0)
31299 if (is_load_insn (ready[pos], &load_mem))
31301 /* Found a load. Move it to the head of the ready list,
31302 and adjust it's priority so that it is more likely to
31303 stay there */
31304 tmp = ready[pos];
31305 for (i=pos; i<*pn_ready-1; i++)
31306 ready[i] = ready[i + 1];
31307 ready[*pn_ready-1] = tmp;
31309 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31310 INSN_PRIORITY (tmp)++;
31311 break;
31313 pos--;
31316 else if (load_store_pendulum == -2)
31318 /* Two stores have been issued in this cycle. Increase the
31319 priority of the first load in the ready list to favor it for
31320 issuing in the next cycle. */
31321 pos = *pn_ready-1;
31323 while (pos >= 0)
31325 if (is_load_insn (ready[pos], &load_mem)
31326 && !sel_sched_p ()
31327 && INSN_PRIORITY_KNOWN (ready[pos]))
31329 INSN_PRIORITY (ready[pos])++;
31331 /* Adjust the pendulum to account for the fact that a load
31332 was found and increased in priority. This is to prevent
31333 increasing the priority of multiple loads */
31334 load_store_pendulum--;
31336 break;
31338 pos--;
31341 else if (load_store_pendulum == -1)
31343 /* A store has been issued in this cycle. Scan the ready list for
31344 another store to issue with it, preferring a store to an adjacent
31345 memory location */
31346 int first_store_pos = -1;
31348 pos = *pn_ready-1;
31350 while (pos >= 0)
31352 if (is_store_insn (ready[pos], &str_mem))
31354 rtx str_mem2;
31355 /* Maintain the index of the first store found on the
31356 list */
31357 if (first_store_pos == -1)
31358 first_store_pos = pos;
31360 if (is_store_insn (last_scheduled_insn, &str_mem2)
31361 && adjacent_mem_locations (str_mem, str_mem2))
31363 /* Found an adjacent store. Move it to the head of the
31364 ready list, and adjust it's priority so that it is
31365 more likely to stay there */
31366 tmp = ready[pos];
31367 for (i=pos; i<*pn_ready-1; i++)
31368 ready[i] = ready[i + 1];
31369 ready[*pn_ready-1] = tmp;
31371 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31372 INSN_PRIORITY (tmp)++;
31374 first_store_pos = -1;
31376 break;
31379 pos--;
31382 if (first_store_pos >= 0)
31384 /* An adjacent store wasn't found, but a non-adjacent store was,
31385 so move the non-adjacent store to the front of the ready
31386 list, and adjust its priority so that it is more likely to
31387 stay there. */
31388 tmp = ready[first_store_pos];
31389 for (i=first_store_pos; i<*pn_ready-1; i++)
31390 ready[i] = ready[i + 1];
31391 ready[*pn_ready-1] = tmp;
31392 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31393 INSN_PRIORITY (tmp)++;
31396 else if (load_store_pendulum == 2)
31398 /* Two loads have been issued in this cycle. Increase the priority
31399 of the first store in the ready list to favor it for issuing in
31400 the next cycle. */
31401 pos = *pn_ready-1;
31403 while (pos >= 0)
31405 if (is_store_insn (ready[pos], &str_mem)
31406 && !sel_sched_p ()
31407 && INSN_PRIORITY_KNOWN (ready[pos]))
31409 INSN_PRIORITY (ready[pos])++;
31411 /* Adjust the pendulum to account for the fact that a store
31412 was found and increased in priority. This is to prevent
31413 increasing the priority of multiple stores */
31414 load_store_pendulum++;
31416 break;
31418 pos--;
31423 /* Do Power9 dependent reordering if necessary. */
31424 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31425 && recog_memoized (last_scheduled_insn) >= 0)
31426 return power9_sched_reorder2 (ready, *pn_ready - 1);
31428 return cached_can_issue_more;
31431 /* Return whether the presence of INSN causes a dispatch group termination
31432 of group WHICH_GROUP.
31434 If WHICH_GROUP == current_group, this function will return true if INSN
31435 causes the termination of the current group (i.e, the dispatch group to
31436 which INSN belongs). This means that INSN will be the last insn in the
31437 group it belongs to.
31439 If WHICH_GROUP == previous_group, this function will return true if INSN
31440 causes the termination of the previous group (i.e, the dispatch group that
31441 precedes the group to which INSN belongs). This means that INSN will be
31442 the first insn in the group it belongs to). */
31444 static bool
31445 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31447 bool first, last;
31449 if (! insn)
31450 return false;
31452 first = insn_must_be_first_in_group (insn);
31453 last = insn_must_be_last_in_group (insn);
31455 if (first && last)
31456 return true;
31458 if (which_group == current_group)
31459 return last;
31460 else if (which_group == previous_group)
31461 return first;
31463 return false;
31467 static bool
31468 insn_must_be_first_in_group (rtx_insn *insn)
31470 enum attr_type type;
31472 if (!insn
31473 || NOTE_P (insn)
31474 || DEBUG_INSN_P (insn)
31475 || GET_CODE (PATTERN (insn)) == USE
31476 || GET_CODE (PATTERN (insn)) == CLOBBER)
31477 return false;
31479 switch (rs6000_cpu)
31481 case PROCESSOR_POWER5:
31482 if (is_cracked_insn (insn))
31483 return true;
31484 case PROCESSOR_POWER4:
31485 if (is_microcoded_insn (insn))
31486 return true;
31488 if (!rs6000_sched_groups)
31489 return false;
31491 type = get_attr_type (insn);
31493 switch (type)
31495 case TYPE_MFCR:
31496 case TYPE_MFCRF:
31497 case TYPE_MTCR:
31498 case TYPE_DELAYED_CR:
31499 case TYPE_CR_LOGICAL:
31500 case TYPE_MTJMPR:
31501 case TYPE_MFJMPR:
31502 case TYPE_DIV:
31503 case TYPE_LOAD_L:
31504 case TYPE_STORE_C:
31505 case TYPE_ISYNC:
31506 case TYPE_SYNC:
31507 return true;
31508 default:
31509 break;
31511 break;
31512 case PROCESSOR_POWER6:
31513 type = get_attr_type (insn);
31515 switch (type)
31517 case TYPE_EXTS:
31518 case TYPE_CNTLZ:
31519 case TYPE_TRAP:
31520 case TYPE_MUL:
31521 case TYPE_INSERT:
31522 case TYPE_FPCOMPARE:
31523 case TYPE_MFCR:
31524 case TYPE_MTCR:
31525 case TYPE_MFJMPR:
31526 case TYPE_MTJMPR:
31527 case TYPE_ISYNC:
31528 case TYPE_SYNC:
31529 case TYPE_LOAD_L:
31530 case TYPE_STORE_C:
31531 return true;
31532 case TYPE_SHIFT:
31533 if (get_attr_dot (insn) == DOT_NO
31534 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31535 return true;
31536 else
31537 break;
31538 case TYPE_DIV:
31539 if (get_attr_size (insn) == SIZE_32)
31540 return true;
31541 else
31542 break;
31543 case TYPE_LOAD:
31544 case TYPE_STORE:
31545 case TYPE_FPLOAD:
31546 case TYPE_FPSTORE:
31547 if (get_attr_update (insn) == UPDATE_YES)
31548 return true;
31549 else
31550 break;
31551 default:
31552 break;
31554 break;
31555 case PROCESSOR_POWER7:
31556 type = get_attr_type (insn);
31558 switch (type)
31560 case TYPE_CR_LOGICAL:
31561 case TYPE_MFCR:
31562 case TYPE_MFCRF:
31563 case TYPE_MTCR:
31564 case TYPE_DIV:
31565 case TYPE_ISYNC:
31566 case TYPE_LOAD_L:
31567 case TYPE_STORE_C:
31568 case TYPE_MFJMPR:
31569 case TYPE_MTJMPR:
31570 return true;
31571 case TYPE_MUL:
31572 case TYPE_SHIFT:
31573 case TYPE_EXTS:
31574 if (get_attr_dot (insn) == DOT_YES)
31575 return true;
31576 else
31577 break;
31578 case TYPE_LOAD:
31579 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31580 || get_attr_update (insn) == UPDATE_YES)
31581 return true;
31582 else
31583 break;
31584 case TYPE_STORE:
31585 case TYPE_FPLOAD:
31586 case TYPE_FPSTORE:
31587 if (get_attr_update (insn) == UPDATE_YES)
31588 return true;
31589 else
31590 break;
31591 default:
31592 break;
31594 break;
31595 case PROCESSOR_POWER8:
31596 type = get_attr_type (insn);
31598 switch (type)
31600 case TYPE_CR_LOGICAL:
31601 case TYPE_DELAYED_CR:
31602 case TYPE_MFCR:
31603 case TYPE_MFCRF:
31604 case TYPE_MTCR:
31605 case TYPE_SYNC:
31606 case TYPE_ISYNC:
31607 case TYPE_LOAD_L:
31608 case TYPE_STORE_C:
31609 case TYPE_VECSTORE:
31610 case TYPE_MFJMPR:
31611 case TYPE_MTJMPR:
31612 return true;
31613 case TYPE_SHIFT:
31614 case TYPE_EXTS:
31615 case TYPE_MUL:
31616 if (get_attr_dot (insn) == DOT_YES)
31617 return true;
31618 else
31619 break;
31620 case TYPE_LOAD:
31621 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31622 || get_attr_update (insn) == UPDATE_YES)
31623 return true;
31624 else
31625 break;
31626 case TYPE_STORE:
31627 if (get_attr_update (insn) == UPDATE_YES
31628 && get_attr_indexed (insn) == INDEXED_YES)
31629 return true;
31630 else
31631 break;
31632 default:
31633 break;
31635 break;
31636 default:
31637 break;
31640 return false;
31643 static bool
31644 insn_must_be_last_in_group (rtx_insn *insn)
31646 enum attr_type type;
31648 if (!insn
31649 || NOTE_P (insn)
31650 || DEBUG_INSN_P (insn)
31651 || GET_CODE (PATTERN (insn)) == USE
31652 || GET_CODE (PATTERN (insn)) == CLOBBER)
31653 return false;
31655 switch (rs6000_cpu) {
31656 case PROCESSOR_POWER4:
31657 case PROCESSOR_POWER5:
31658 if (is_microcoded_insn (insn))
31659 return true;
31661 if (is_branch_slot_insn (insn))
31662 return true;
31664 break;
31665 case PROCESSOR_POWER6:
31666 type = get_attr_type (insn);
31668 switch (type)
31670 case TYPE_EXTS:
31671 case TYPE_CNTLZ:
31672 case TYPE_TRAP:
31673 case TYPE_MUL:
31674 case TYPE_FPCOMPARE:
31675 case TYPE_MFCR:
31676 case TYPE_MTCR:
31677 case TYPE_MFJMPR:
31678 case TYPE_MTJMPR:
31679 case TYPE_ISYNC:
31680 case TYPE_SYNC:
31681 case TYPE_LOAD_L:
31682 case TYPE_STORE_C:
31683 return true;
31684 case TYPE_SHIFT:
31685 if (get_attr_dot (insn) == DOT_NO
31686 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31687 return true;
31688 else
31689 break;
31690 case TYPE_DIV:
31691 if (get_attr_size (insn) == SIZE_32)
31692 return true;
31693 else
31694 break;
31695 default:
31696 break;
31698 break;
31699 case PROCESSOR_POWER7:
31700 type = get_attr_type (insn);
31702 switch (type)
31704 case TYPE_ISYNC:
31705 case TYPE_SYNC:
31706 case TYPE_LOAD_L:
31707 case TYPE_STORE_C:
31708 return true;
31709 case TYPE_LOAD:
31710 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31711 && get_attr_update (insn) == UPDATE_YES)
31712 return true;
31713 else
31714 break;
31715 case TYPE_STORE:
31716 if (get_attr_update (insn) == UPDATE_YES
31717 && get_attr_indexed (insn) == INDEXED_YES)
31718 return true;
31719 else
31720 break;
31721 default:
31722 break;
31724 break;
31725 case PROCESSOR_POWER8:
31726 type = get_attr_type (insn);
31728 switch (type)
31730 case TYPE_MFCR:
31731 case TYPE_MTCR:
31732 case TYPE_ISYNC:
31733 case TYPE_SYNC:
31734 case TYPE_LOAD_L:
31735 case TYPE_STORE_C:
31736 return true;
31737 case TYPE_LOAD:
31738 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31739 && get_attr_update (insn) == UPDATE_YES)
31740 return true;
31741 else
31742 break;
31743 case TYPE_STORE:
31744 if (get_attr_update (insn) == UPDATE_YES
31745 && get_attr_indexed (insn) == INDEXED_YES)
31746 return true;
31747 else
31748 break;
31749 default:
31750 break;
31752 break;
31753 default:
31754 break;
31757 return false;
31760 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31761 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31763 static bool
31764 is_costly_group (rtx *group_insns, rtx next_insn)
31766 int i;
31767 int issue_rate = rs6000_issue_rate ();
31769 for (i = 0; i < issue_rate; i++)
31771 sd_iterator_def sd_it;
31772 dep_t dep;
31773 rtx insn = group_insns[i];
31775 if (!insn)
31776 continue;
31778 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31780 rtx next = DEP_CON (dep);
31782 if (next == next_insn
31783 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31784 return true;
31788 return false;
31791 /* Utility of the function redefine_groups.
31792 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31793 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31794 to keep it "far" (in a separate group) from GROUP_INSNS, following
31795 one of the following schemes, depending on the value of the flag
31796 -minsert_sched_nops = X:
31797 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31798 in order to force NEXT_INSN into a separate group.
31799 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31800 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31801 insertion (has a group just ended, how many vacant issue slots remain in the
31802 last group, and how many dispatch groups were encountered so far). */
31804 static int
31805 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31806 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31807 int *group_count)
31809 rtx nop;
31810 bool force;
31811 int issue_rate = rs6000_issue_rate ();
31812 bool end = *group_end;
31813 int i;
31815 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31816 return can_issue_more;
31818 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31819 return can_issue_more;
31821 force = is_costly_group (group_insns, next_insn);
31822 if (!force)
31823 return can_issue_more;
31825 if (sched_verbose > 6)
31826 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31827 *group_count ,can_issue_more);
31829 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31831 if (*group_end)
31832 can_issue_more = 0;
31834 /* Since only a branch can be issued in the last issue_slot, it is
31835 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31836 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31837 in this case the last nop will start a new group and the branch
31838 will be forced to the new group. */
31839 if (can_issue_more && !is_branch_slot_insn (next_insn))
31840 can_issue_more--;
31842 /* Do we have a special group ending nop? */
31843 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
31844 || rs6000_cpu_attr == CPU_POWER8)
31846 nop = gen_group_ending_nop ();
31847 emit_insn_before (nop, next_insn);
31848 can_issue_more = 0;
31850 else
31851 while (can_issue_more > 0)
31853 nop = gen_nop ();
31854 emit_insn_before (nop, next_insn);
31855 can_issue_more--;
31858 *group_end = true;
31859 return 0;
31862 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31864 int n_nops = rs6000_sched_insert_nops;
31866 /* Nops can't be issued from the branch slot, so the effective
31867 issue_rate for nops is 'issue_rate - 1'. */
31868 if (can_issue_more == 0)
31869 can_issue_more = issue_rate;
31870 can_issue_more--;
31871 if (can_issue_more == 0)
31873 can_issue_more = issue_rate - 1;
31874 (*group_count)++;
31875 end = true;
31876 for (i = 0; i < issue_rate; i++)
31878 group_insns[i] = 0;
31882 while (n_nops > 0)
31884 nop = gen_nop ();
31885 emit_insn_before (nop, next_insn);
31886 if (can_issue_more == issue_rate - 1) /* new group begins */
31887 end = false;
31888 can_issue_more--;
31889 if (can_issue_more == 0)
31891 can_issue_more = issue_rate - 1;
31892 (*group_count)++;
31893 end = true;
31894 for (i = 0; i < issue_rate; i++)
31896 group_insns[i] = 0;
31899 n_nops--;
31902 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31903 can_issue_more++;
31905 /* Is next_insn going to start a new group? */
31906 *group_end
31907 = (end
31908 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31909 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31910 || (can_issue_more < issue_rate &&
31911 insn_terminates_group_p (next_insn, previous_group)));
31912 if (*group_end && end)
31913 (*group_count)--;
31915 if (sched_verbose > 6)
31916 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31917 *group_count, can_issue_more);
31918 return can_issue_more;
31921 return can_issue_more;
31924 /* This function tries to synch the dispatch groups that the compiler "sees"
31925 with the dispatch groups that the processor dispatcher is expected to
31926 form in practice. It tries to achieve this synchronization by forcing the
31927 estimated processor grouping on the compiler (as opposed to the function
31928 'pad_goups' which tries to force the scheduler's grouping on the processor).
31930 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31931 examines the (estimated) dispatch groups that will be formed by the processor
31932 dispatcher. It marks these group boundaries to reflect the estimated
31933 processor grouping, overriding the grouping that the scheduler had marked.
31934 Depending on the value of the flag '-minsert-sched-nops' this function can
31935 force certain insns into separate groups or force a certain distance between
31936 them by inserting nops, for example, if there exists a "costly dependence"
31937 between the insns.
31939 The function estimates the group boundaries that the processor will form as
31940 follows: It keeps track of how many vacant issue slots are available after
31941 each insn. A subsequent insn will start a new group if one of the following
31942 4 cases applies:
31943 - no more vacant issue slots remain in the current dispatch group.
31944 - only the last issue slot, which is the branch slot, is vacant, but the next
31945 insn is not a branch.
31946 - only the last 2 or less issue slots, including the branch slot, are vacant,
31947 which means that a cracked insn (which occupies two issue slots) can't be
31948 issued in this group.
31949 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31950 start a new group. */
31952 static int
31953 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31954 rtx_insn *tail)
31956 rtx_insn *insn, *next_insn;
31957 int issue_rate;
31958 int can_issue_more;
31959 int slot, i;
31960 bool group_end;
31961 int group_count = 0;
31962 rtx *group_insns;
31964 /* Initialize. */
31965 issue_rate = rs6000_issue_rate ();
31966 group_insns = XALLOCAVEC (rtx, issue_rate);
31967 for (i = 0; i < issue_rate; i++)
31969 group_insns[i] = 0;
31971 can_issue_more = issue_rate;
31972 slot = 0;
31973 insn = get_next_active_insn (prev_head_insn, tail);
31974 group_end = false;
31976 while (insn != NULL_RTX)
31978 slot = (issue_rate - can_issue_more);
31979 group_insns[slot] = insn;
31980 can_issue_more =
31981 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31982 if (insn_terminates_group_p (insn, current_group))
31983 can_issue_more = 0;
31985 next_insn = get_next_active_insn (insn, tail);
31986 if (next_insn == NULL_RTX)
31987 return group_count + 1;
31989 /* Is next_insn going to start a new group? */
31990 group_end
31991 = (can_issue_more == 0
31992 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31993 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31994 || (can_issue_more < issue_rate &&
31995 insn_terminates_group_p (next_insn, previous_group)));
31997 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
31998 next_insn, &group_end, can_issue_more,
31999 &group_count);
32001 if (group_end)
32003 group_count++;
32004 can_issue_more = 0;
32005 for (i = 0; i < issue_rate; i++)
32007 group_insns[i] = 0;
32011 if (GET_MODE (next_insn) == TImode && can_issue_more)
32012 PUT_MODE (next_insn, VOIDmode);
32013 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32014 PUT_MODE (next_insn, TImode);
32016 insn = next_insn;
32017 if (can_issue_more == 0)
32018 can_issue_more = issue_rate;
32019 } /* while */
32021 return group_count;
32024 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32025 dispatch group boundaries that the scheduler had marked. Pad with nops
32026 any dispatch groups which have vacant issue slots, in order to force the
32027 scheduler's grouping on the processor dispatcher. The function
32028 returns the number of dispatch groups found. */
32030 static int
32031 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32032 rtx_insn *tail)
32034 rtx_insn *insn, *next_insn;
32035 rtx nop;
32036 int issue_rate;
32037 int can_issue_more;
32038 int group_end;
32039 int group_count = 0;
32041 /* Initialize issue_rate. */
32042 issue_rate = rs6000_issue_rate ();
32043 can_issue_more = issue_rate;
32045 insn = get_next_active_insn (prev_head_insn, tail);
32046 next_insn = get_next_active_insn (insn, tail);
32048 while (insn != NULL_RTX)
32050 can_issue_more =
32051 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32053 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32055 if (next_insn == NULL_RTX)
32056 break;
32058 if (group_end)
32060 /* If the scheduler had marked group termination at this location
32061 (between insn and next_insn), and neither insn nor next_insn will
32062 force group termination, pad the group with nops to force group
32063 termination. */
32064 if (can_issue_more
32065 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32066 && !insn_terminates_group_p (insn, current_group)
32067 && !insn_terminates_group_p (next_insn, previous_group))
32069 if (!is_branch_slot_insn (next_insn))
32070 can_issue_more--;
32072 while (can_issue_more)
32074 nop = gen_nop ();
32075 emit_insn_before (nop, next_insn);
32076 can_issue_more--;
32080 can_issue_more = issue_rate;
32081 group_count++;
32084 insn = next_insn;
32085 next_insn = get_next_active_insn (insn, tail);
32088 return group_count;
32091 /* We're beginning a new block. Initialize data structures as necessary. */
32093 static void
32094 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32095 int sched_verbose ATTRIBUTE_UNUSED,
32096 int max_ready ATTRIBUTE_UNUSED)
32098 last_scheduled_insn = NULL;
32099 load_store_pendulum = 0;
32100 divide_cnt = 0;
32101 vec_load_pendulum = 0;
32104 /* The following function is called at the end of scheduling BB.
32105 After reload, it inserts nops at insn group bundling. */
32107 static void
32108 rs6000_sched_finish (FILE *dump, int sched_verbose)
32110 int n_groups;
32112 if (sched_verbose)
32113 fprintf (dump, "=== Finishing schedule.\n");
32115 if (reload_completed && rs6000_sched_groups)
32117 /* Do not run sched_finish hook when selective scheduling enabled. */
32118 if (sel_sched_p ())
32119 return;
32121 if (rs6000_sched_insert_nops == sched_finish_none)
32122 return;
32124 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32125 n_groups = pad_groups (dump, sched_verbose,
32126 current_sched_info->prev_head,
32127 current_sched_info->next_tail);
32128 else
32129 n_groups = redefine_groups (dump, sched_verbose,
32130 current_sched_info->prev_head,
32131 current_sched_info->next_tail);
32133 if (sched_verbose >= 6)
32135 fprintf (dump, "ngroups = %d\n", n_groups);
32136 print_rtl (dump, current_sched_info->prev_head);
32137 fprintf (dump, "Done finish_sched\n");
32142 struct rs6000_sched_context
32144 short cached_can_issue_more;
32145 rtx_insn *last_scheduled_insn;
32146 int load_store_pendulum;
32147 int divide_cnt;
32148 int vec_load_pendulum;
32151 typedef struct rs6000_sched_context rs6000_sched_context_def;
32152 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32154 /* Allocate store for new scheduling context. */
32155 static void *
32156 rs6000_alloc_sched_context (void)
32158 return xmalloc (sizeof (rs6000_sched_context_def));
32161 /* If CLEAN_P is true then initializes _SC with clean data,
32162 and from the global context otherwise. */
32163 static void
32164 rs6000_init_sched_context (void *_sc, bool clean_p)
32166 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32168 if (clean_p)
32170 sc->cached_can_issue_more = 0;
32171 sc->last_scheduled_insn = NULL;
32172 sc->load_store_pendulum = 0;
32173 sc->divide_cnt = 0;
32174 sc->vec_load_pendulum = 0;
32176 else
32178 sc->cached_can_issue_more = cached_can_issue_more;
32179 sc->last_scheduled_insn = last_scheduled_insn;
32180 sc->load_store_pendulum = load_store_pendulum;
32181 sc->divide_cnt = divide_cnt;
32182 sc->vec_load_pendulum = vec_load_pendulum;
32186 /* Sets the global scheduling context to the one pointed to by _SC. */
32187 static void
32188 rs6000_set_sched_context (void *_sc)
32190 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32192 gcc_assert (sc != NULL);
32194 cached_can_issue_more = sc->cached_can_issue_more;
32195 last_scheduled_insn = sc->last_scheduled_insn;
32196 load_store_pendulum = sc->load_store_pendulum;
32197 divide_cnt = sc->divide_cnt;
32198 vec_load_pendulum = sc->vec_load_pendulum;
32201 /* Free _SC. */
32202 static void
32203 rs6000_free_sched_context (void *_sc)
32205 gcc_assert (_sc != NULL);
32207 free (_sc);
32211 /* Length in units of the trampoline for entering a nested function. */
32214 rs6000_trampoline_size (void)
32216 int ret = 0;
32218 switch (DEFAULT_ABI)
32220 default:
32221 gcc_unreachable ();
32223 case ABI_AIX:
32224 ret = (TARGET_32BIT) ? 12 : 24;
32225 break;
32227 case ABI_ELFv2:
32228 gcc_assert (!TARGET_32BIT);
32229 ret = 32;
32230 break;
32232 case ABI_DARWIN:
32233 case ABI_V4:
32234 ret = (TARGET_32BIT) ? 40 : 48;
32235 break;
32238 return ret;
32241 /* Emit RTL insns to initialize the variable parts of a trampoline.
32242 FNADDR is an RTX for the address of the function's pure code.
32243 CXT is an RTX for the static chain value for the function. */
32245 static void
32246 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32248 int regsize = (TARGET_32BIT) ? 4 : 8;
32249 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32250 rtx ctx_reg = force_reg (Pmode, cxt);
32251 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32253 switch (DEFAULT_ABI)
32255 default:
32256 gcc_unreachable ();
32258 /* Under AIX, just build the 3 word function descriptor */
32259 case ABI_AIX:
32261 rtx fnmem, fn_reg, toc_reg;
32263 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32264 error ("You cannot take the address of a nested function if you use "
32265 "the -mno-pointers-to-nested-functions option.");
32267 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32268 fn_reg = gen_reg_rtx (Pmode);
32269 toc_reg = gen_reg_rtx (Pmode);
32271 /* Macro to shorten the code expansions below. */
32272 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32274 m_tramp = replace_equiv_address (m_tramp, addr);
32276 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32277 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32278 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32279 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32280 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32282 # undef MEM_PLUS
32284 break;
32286 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32287 case ABI_ELFv2:
32288 case ABI_DARWIN:
32289 case ABI_V4:
32290 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32291 LCT_NORMAL, VOIDmode, 4,
32292 addr, Pmode,
32293 GEN_INT (rs6000_trampoline_size ()), SImode,
32294 fnaddr, Pmode,
32295 ctx_reg, Pmode);
32296 break;
32301 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32302 identifier as an argument, so the front end shouldn't look it up. */
32304 static bool
32305 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32307 return is_attribute_p ("altivec", attr_id);
32310 /* Handle the "altivec" attribute. The attribute may have
32311 arguments as follows:
32313 __attribute__((altivec(vector__)))
32314 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32315 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32317 and may appear more than once (e.g., 'vector bool char') in a
32318 given declaration. */
32320 static tree
32321 rs6000_handle_altivec_attribute (tree *node,
32322 tree name ATTRIBUTE_UNUSED,
32323 tree args,
32324 int flags ATTRIBUTE_UNUSED,
32325 bool *no_add_attrs)
32327 tree type = *node, result = NULL_TREE;
32328 machine_mode mode;
32329 int unsigned_p;
32330 char altivec_type
32331 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32332 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32333 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32334 : '?');
32336 while (POINTER_TYPE_P (type)
32337 || TREE_CODE (type) == FUNCTION_TYPE
32338 || TREE_CODE (type) == METHOD_TYPE
32339 || TREE_CODE (type) == ARRAY_TYPE)
32340 type = TREE_TYPE (type);
32342 mode = TYPE_MODE (type);
32344 /* Check for invalid AltiVec type qualifiers. */
32345 if (type == long_double_type_node)
32346 error ("use of %<long double%> in AltiVec types is invalid");
32347 else if (type == boolean_type_node)
32348 error ("use of boolean types in AltiVec types is invalid");
32349 else if (TREE_CODE (type) == COMPLEX_TYPE)
32350 error ("use of %<complex%> in AltiVec types is invalid");
32351 else if (DECIMAL_FLOAT_MODE_P (mode))
32352 error ("use of decimal floating point types in AltiVec types is invalid");
32353 else if (!TARGET_VSX)
32355 if (type == long_unsigned_type_node || type == long_integer_type_node)
32357 if (TARGET_64BIT)
32358 error ("use of %<long%> in AltiVec types is invalid for "
32359 "64-bit code without -mvsx");
32360 else if (rs6000_warn_altivec_long)
32361 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32362 "use %<int%>");
32364 else if (type == long_long_unsigned_type_node
32365 || type == long_long_integer_type_node)
32366 error ("use of %<long long%> in AltiVec types is invalid without "
32367 "-mvsx");
32368 else if (type == double_type_node)
32369 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
32372 switch (altivec_type)
32374 case 'v':
32375 unsigned_p = TYPE_UNSIGNED (type);
32376 switch (mode)
32378 case TImode:
32379 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32380 break;
32381 case DImode:
32382 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32383 break;
32384 case SImode:
32385 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32386 break;
32387 case HImode:
32388 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32389 break;
32390 case QImode:
32391 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32392 break;
32393 case SFmode: result = V4SF_type_node; break;
32394 case DFmode: result = V2DF_type_node; break;
32395 /* If the user says 'vector int bool', we may be handed the 'bool'
32396 attribute _before_ the 'vector' attribute, and so select the
32397 proper type in the 'b' case below. */
32398 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
32399 case V2DImode: case V2DFmode:
32400 result = type;
32401 default: break;
32403 break;
32404 case 'b':
32405 switch (mode)
32407 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
32408 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
32409 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
32410 case QImode: case V16QImode: result = bool_V16QI_type_node;
32411 default: break;
32413 break;
32414 case 'p':
32415 switch (mode)
32417 case V8HImode: result = pixel_V8HI_type_node;
32418 default: break;
32420 default: break;
32423 /* Propagate qualifiers attached to the element type
32424 onto the vector type. */
32425 if (result && result != type && TYPE_QUALS (type))
32426 result = build_qualified_type (result, TYPE_QUALS (type));
32428 *no_add_attrs = true; /* No need to hang on to the attribute. */
32430 if (result)
32431 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32433 return NULL_TREE;
32436 /* AltiVec defines four built-in scalar types that serve as vector
32437 elements; we must teach the compiler how to mangle them. */
32439 static const char *
32440 rs6000_mangle_type (const_tree type)
32442 type = TYPE_MAIN_VARIANT (type);
32444 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32445 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32446 return NULL;
32448 if (type == bool_char_type_node) return "U6__boolc";
32449 if (type == bool_short_type_node) return "U6__bools";
32450 if (type == pixel_type_node) return "u7__pixel";
32451 if (type == bool_int_type_node) return "U6__booli";
32452 if (type == bool_long_type_node) return "U6__booll";
32454 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32455 "g" for IBM extended double, no matter whether it is long double (using
32456 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32457 if (TARGET_FLOAT128)
32459 if (type == ieee128_float_type_node)
32460 return "U10__float128";
32462 if (type == ibm128_float_type_node)
32463 return "g";
32465 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
32466 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32469 /* Mangle IBM extended float long double as `g' (__float128) on
32470 powerpc*-linux where long-double-64 previously was the default. */
32471 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32472 && TARGET_ELF
32473 && TARGET_LONG_DOUBLE_128
32474 && !TARGET_IEEEQUAD)
32475 return "g";
32477 /* For all other types, use normal C++ mangling. */
32478 return NULL;
32481 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32482 struct attribute_spec.handler. */
32484 static tree
32485 rs6000_handle_longcall_attribute (tree *node, tree name,
32486 tree args ATTRIBUTE_UNUSED,
32487 int flags ATTRIBUTE_UNUSED,
32488 bool *no_add_attrs)
32490 if (TREE_CODE (*node) != FUNCTION_TYPE
32491 && TREE_CODE (*node) != FIELD_DECL
32492 && TREE_CODE (*node) != TYPE_DECL)
32494 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32495 name);
32496 *no_add_attrs = true;
32499 return NULL_TREE;
32502 /* Set longcall attributes on all functions declared when
32503 rs6000_default_long_calls is true. */
32504 static void
32505 rs6000_set_default_type_attributes (tree type)
32507 if (rs6000_default_long_calls
32508 && (TREE_CODE (type) == FUNCTION_TYPE
32509 || TREE_CODE (type) == METHOD_TYPE))
32510 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32511 NULL_TREE,
32512 TYPE_ATTRIBUTES (type));
32514 #if TARGET_MACHO
32515 darwin_set_default_type_attributes (type);
32516 #endif
32519 /* Return a reference suitable for calling a function with the
32520 longcall attribute. */
32523 rs6000_longcall_ref (rtx call_ref)
32525 const char *call_name;
32526 tree node;
32528 if (GET_CODE (call_ref) != SYMBOL_REF)
32529 return call_ref;
32531 /* System V adds '.' to the internal name, so skip them. */
32532 call_name = XSTR (call_ref, 0);
32533 if (*call_name == '.')
32535 while (*call_name == '.')
32536 call_name++;
32538 node = get_identifier (call_name);
32539 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32542 return force_reg (Pmode, call_ref);
32545 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32546 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32547 #endif
32549 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32550 struct attribute_spec.handler. */
32551 static tree
32552 rs6000_handle_struct_attribute (tree *node, tree name,
32553 tree args ATTRIBUTE_UNUSED,
32554 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32556 tree *type = NULL;
32557 if (DECL_P (*node))
32559 if (TREE_CODE (*node) == TYPE_DECL)
32560 type = &TREE_TYPE (*node);
32562 else
32563 type = node;
32565 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32566 || TREE_CODE (*type) == UNION_TYPE)))
32568 warning (OPT_Wattributes, "%qE attribute ignored", name);
32569 *no_add_attrs = true;
32572 else if ((is_attribute_p ("ms_struct", name)
32573 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32574 || ((is_attribute_p ("gcc_struct", name)
32575 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32577 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32578 name);
32579 *no_add_attrs = true;
32582 return NULL_TREE;
32585 static bool
32586 rs6000_ms_bitfield_layout_p (const_tree record_type)
32588 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32589 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32590 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32593 #ifdef USING_ELFOS_H
32595 /* A get_unnamed_section callback, used for switching to toc_section. */
32597 static void
32598 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32600 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32601 && TARGET_MINIMAL_TOC)
32603 if (!toc_initialized)
32605 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32606 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32607 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32608 fprintf (asm_out_file, "\t.tc ");
32609 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32610 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32611 fprintf (asm_out_file, "\n");
32613 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32614 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32615 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32616 fprintf (asm_out_file, " = .+32768\n");
32617 toc_initialized = 1;
32619 else
32620 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32622 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32624 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32625 if (!toc_initialized)
32627 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32628 toc_initialized = 1;
32631 else
32633 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32634 if (!toc_initialized)
32636 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32637 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32638 fprintf (asm_out_file, " = .+32768\n");
32639 toc_initialized = 1;
32644 /* Implement TARGET_ASM_INIT_SECTIONS. */
32646 static void
32647 rs6000_elf_asm_init_sections (void)
32649 toc_section
32650 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32652 sdata2_section
32653 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32654 SDATA2_SECTION_ASM_OP);
32657 /* Implement TARGET_SELECT_RTX_SECTION. */
32659 static section *
32660 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32661 unsigned HOST_WIDE_INT align)
32663 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32664 return toc_section;
32665 else
32666 return default_elf_select_rtx_section (mode, x, align);
32669 /* For a SYMBOL_REF, set generic flags and then perform some
32670 target-specific processing.
32672 When the AIX ABI is requested on a non-AIX system, replace the
32673 function name with the real name (with a leading .) rather than the
32674 function descriptor name. This saves a lot of overriding code to
32675 read the prefixes. */
32677 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32678 static void
32679 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32681 default_encode_section_info (decl, rtl, first);
32683 if (first
32684 && TREE_CODE (decl) == FUNCTION_DECL
32685 && !TARGET_AIX
32686 && DEFAULT_ABI == ABI_AIX)
32688 rtx sym_ref = XEXP (rtl, 0);
32689 size_t len = strlen (XSTR (sym_ref, 0));
32690 char *str = XALLOCAVEC (char, len + 2);
32691 str[0] = '.';
32692 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32693 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32697 static inline bool
32698 compare_section_name (const char *section, const char *templ)
32700 int len;
32702 len = strlen (templ);
32703 return (strncmp (section, templ, len) == 0
32704 && (section[len] == 0 || section[len] == '.'));
32707 bool
32708 rs6000_elf_in_small_data_p (const_tree decl)
32710 if (rs6000_sdata == SDATA_NONE)
32711 return false;
32713 /* We want to merge strings, so we never consider them small data. */
32714 if (TREE_CODE (decl) == STRING_CST)
32715 return false;
32717 /* Functions are never in the small data area. */
32718 if (TREE_CODE (decl) == FUNCTION_DECL)
32719 return false;
32721 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32723 const char *section = DECL_SECTION_NAME (decl);
32724 if (compare_section_name (section, ".sdata")
32725 || compare_section_name (section, ".sdata2")
32726 || compare_section_name (section, ".gnu.linkonce.s")
32727 || compare_section_name (section, ".sbss")
32728 || compare_section_name (section, ".sbss2")
32729 || compare_section_name (section, ".gnu.linkonce.sb")
32730 || strcmp (section, ".PPC.EMB.sdata0") == 0
32731 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32732 return true;
32734 else
32736 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32738 if (size > 0
32739 && size <= g_switch_value
32740 /* If it's not public, and we're not going to reference it there,
32741 there's no need to put it in the small data section. */
32742 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32743 return true;
32746 return false;
32749 #endif /* USING_ELFOS_H */
32751 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32753 static bool
32754 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32756 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32759 /* Do not place thread-local symbols refs in the object blocks. */
32761 static bool
32762 rs6000_use_blocks_for_decl_p (const_tree decl)
32764 return !DECL_THREAD_LOCAL_P (decl);
32767 /* Return a REG that occurs in ADDR with coefficient 1.
32768 ADDR can be effectively incremented by incrementing REG.
32770 r0 is special and we must not select it as an address
32771 register by this routine since our caller will try to
32772 increment the returned register via an "la" instruction. */
32775 find_addr_reg (rtx addr)
32777 while (GET_CODE (addr) == PLUS)
32779 if (GET_CODE (XEXP (addr, 0)) == REG
32780 && REGNO (XEXP (addr, 0)) != 0)
32781 addr = XEXP (addr, 0);
32782 else if (GET_CODE (XEXP (addr, 1)) == REG
32783 && REGNO (XEXP (addr, 1)) != 0)
32784 addr = XEXP (addr, 1);
32785 else if (CONSTANT_P (XEXP (addr, 0)))
32786 addr = XEXP (addr, 1);
32787 else if (CONSTANT_P (XEXP (addr, 1)))
32788 addr = XEXP (addr, 0);
32789 else
32790 gcc_unreachable ();
32792 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32793 return addr;
32796 void
32797 rs6000_fatal_bad_address (rtx op)
32799 fatal_insn ("bad address", op);
32802 #if TARGET_MACHO
32804 typedef struct branch_island_d {
32805 tree function_name;
32806 tree label_name;
32807 int line_number;
32808 } branch_island;
32811 static vec<branch_island, va_gc> *branch_islands;
32813 /* Remember to generate a branch island for far calls to the given
32814 function. */
32816 static void
32817 add_compiler_branch_island (tree label_name, tree function_name,
32818 int line_number)
32820 branch_island bi = {function_name, label_name, line_number};
32821 vec_safe_push (branch_islands, bi);
32824 /* Generate far-jump branch islands for everything recorded in
32825 branch_islands. Invoked immediately after the last instruction of
32826 the epilogue has been emitted; the branch islands must be appended
32827 to, and contiguous with, the function body. Mach-O stubs are
32828 generated in machopic_output_stub(). */
32830 static void
32831 macho_branch_islands (void)
32833 char tmp_buf[512];
32835 while (!vec_safe_is_empty (branch_islands))
32837 branch_island *bi = &branch_islands->last ();
32838 const char *label = IDENTIFIER_POINTER (bi->label_name);
32839 const char *name = IDENTIFIER_POINTER (bi->function_name);
32840 char name_buf[512];
32841 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32842 if (name[0] == '*' || name[0] == '&')
32843 strcpy (name_buf, name+1);
32844 else
32846 name_buf[0] = '_';
32847 strcpy (name_buf+1, name);
32849 strcpy (tmp_buf, "\n");
32850 strcat (tmp_buf, label);
32851 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32852 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32853 dbxout_stabd (N_SLINE, bi->line_number);
32854 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32855 if (flag_pic)
32857 if (TARGET_LINK_STACK)
32859 char name[32];
32860 get_ppc476_thunk_name (name);
32861 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32862 strcat (tmp_buf, name);
32863 strcat (tmp_buf, "\n");
32864 strcat (tmp_buf, label);
32865 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32867 else
32869 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32870 strcat (tmp_buf, label);
32871 strcat (tmp_buf, "_pic\n");
32872 strcat (tmp_buf, label);
32873 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32876 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32877 strcat (tmp_buf, name_buf);
32878 strcat (tmp_buf, " - ");
32879 strcat (tmp_buf, label);
32880 strcat (tmp_buf, "_pic)\n");
32882 strcat (tmp_buf, "\tmtlr r0\n");
32884 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32885 strcat (tmp_buf, name_buf);
32886 strcat (tmp_buf, " - ");
32887 strcat (tmp_buf, label);
32888 strcat (tmp_buf, "_pic)\n");
32890 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32892 else
32894 strcat (tmp_buf, ":\nlis r12,hi16(");
32895 strcat (tmp_buf, name_buf);
32896 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32897 strcat (tmp_buf, name_buf);
32898 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32900 output_asm_insn (tmp_buf, 0);
32901 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32902 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32903 dbxout_stabd (N_SLINE, bi->line_number);
32904 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32905 branch_islands->pop ();
32909 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32910 already there or not. */
32912 static int
32913 no_previous_def (tree function_name)
32915 branch_island *bi;
32916 unsigned ix;
32918 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32919 if (function_name == bi->function_name)
32920 return 0;
32921 return 1;
32924 /* GET_PREV_LABEL gets the label name from the previous definition of
32925 the function. */
32927 static tree
32928 get_prev_label (tree function_name)
32930 branch_island *bi;
32931 unsigned ix;
32933 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32934 if (function_name == bi->function_name)
32935 return bi->label_name;
32936 return NULL_TREE;
32939 /* INSN is either a function call or a millicode call. It may have an
32940 unconditional jump in its delay slot.
32942 CALL_DEST is the routine we are calling. */
32944 char *
32945 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32946 int cookie_operand_number)
32948 static char buf[256];
32949 if (darwin_emit_branch_islands
32950 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32951 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32953 tree labelname;
32954 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32956 if (no_previous_def (funname))
32958 rtx label_rtx = gen_label_rtx ();
32959 char *label_buf, temp_buf[256];
32960 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32961 CODE_LABEL_NUMBER (label_rtx));
32962 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32963 labelname = get_identifier (label_buf);
32964 add_compiler_branch_island (labelname, funname, insn_line (insn));
32966 else
32967 labelname = get_prev_label (funname);
32969 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32970 instruction will reach 'foo', otherwise link as 'bl L42'".
32971 "L42" should be a 'branch island', that will do a far jump to
32972 'foo'. Branch islands are generated in
32973 macho_branch_islands(). */
32974 sprintf (buf, "jbsr %%z%d,%.246s",
32975 dest_operand_number, IDENTIFIER_POINTER (labelname));
32977 else
32978 sprintf (buf, "bl %%z%d", dest_operand_number);
32979 return buf;
32982 /* Generate PIC and indirect symbol stubs. */
32984 void
32985 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32987 unsigned int length;
32988 char *symbol_name, *lazy_ptr_name;
32989 char *local_label_0;
32990 static int label = 0;
32992 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32993 symb = (*targetm.strip_name_encoding) (symb);
32996 length = strlen (symb);
32997 symbol_name = XALLOCAVEC (char, length + 32);
32998 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33000 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33001 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33003 if (flag_pic == 2)
33004 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33005 else
33006 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33008 if (flag_pic == 2)
33010 fprintf (file, "\t.align 5\n");
33012 fprintf (file, "%s:\n", stub);
33013 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33015 label++;
33016 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33017 sprintf (local_label_0, "\"L%011d$spb\"", label);
33019 fprintf (file, "\tmflr r0\n");
33020 if (TARGET_LINK_STACK)
33022 char name[32];
33023 get_ppc476_thunk_name (name);
33024 fprintf (file, "\tbl %s\n", name);
33025 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33027 else
33029 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33030 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33032 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33033 lazy_ptr_name, local_label_0);
33034 fprintf (file, "\tmtlr r0\n");
33035 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33036 (TARGET_64BIT ? "ldu" : "lwzu"),
33037 lazy_ptr_name, local_label_0);
33038 fprintf (file, "\tmtctr r12\n");
33039 fprintf (file, "\tbctr\n");
33041 else
33043 fprintf (file, "\t.align 4\n");
33045 fprintf (file, "%s:\n", stub);
33046 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33048 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33049 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33050 (TARGET_64BIT ? "ldu" : "lwzu"),
33051 lazy_ptr_name);
33052 fprintf (file, "\tmtctr r12\n");
33053 fprintf (file, "\tbctr\n");
33056 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33057 fprintf (file, "%s:\n", lazy_ptr_name);
33058 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33059 fprintf (file, "%sdyld_stub_binding_helper\n",
33060 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33063 /* Legitimize PIC addresses. If the address is already
33064 position-independent, we return ORIG. Newly generated
33065 position-independent addresses go into a reg. This is REG if non
33066 zero, otherwise we allocate register(s) as necessary. */
33068 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33071 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33072 rtx reg)
33074 rtx base, offset;
33076 if (reg == NULL && ! reload_in_progress && ! reload_completed)
33077 reg = gen_reg_rtx (Pmode);
33079 if (GET_CODE (orig) == CONST)
33081 rtx reg_temp;
33083 if (GET_CODE (XEXP (orig, 0)) == PLUS
33084 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33085 return orig;
33087 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33089 /* Use a different reg for the intermediate value, as
33090 it will be marked UNCHANGING. */
33091 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33092 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33093 Pmode, reg_temp);
33094 offset =
33095 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33096 Pmode, reg);
33098 if (GET_CODE (offset) == CONST_INT)
33100 if (SMALL_INT (offset))
33101 return plus_constant (Pmode, base, INTVAL (offset));
33102 else if (! reload_in_progress && ! reload_completed)
33103 offset = force_reg (Pmode, offset);
33104 else
33106 rtx mem = force_const_mem (Pmode, orig);
33107 return machopic_legitimize_pic_address (mem, Pmode, reg);
33110 return gen_rtx_PLUS (Pmode, base, offset);
33113 /* Fall back on generic machopic code. */
33114 return machopic_legitimize_pic_address (orig, mode, reg);
33117 /* Output a .machine directive for the Darwin assembler, and call
33118 the generic start_file routine. */
33120 static void
33121 rs6000_darwin_file_start (void)
33123 static const struct
33125 const char *arg;
33126 const char *name;
33127 HOST_WIDE_INT if_set;
33128 } mapping[] = {
33129 { "ppc64", "ppc64", MASK_64BIT },
33130 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33131 { "power4", "ppc970", 0 },
33132 { "G5", "ppc970", 0 },
33133 { "7450", "ppc7450", 0 },
33134 { "7400", "ppc7400", MASK_ALTIVEC },
33135 { "G4", "ppc7400", 0 },
33136 { "750", "ppc750", 0 },
33137 { "740", "ppc750", 0 },
33138 { "G3", "ppc750", 0 },
33139 { "604e", "ppc604e", 0 },
33140 { "604", "ppc604", 0 },
33141 { "603e", "ppc603", 0 },
33142 { "603", "ppc603", 0 },
33143 { "601", "ppc601", 0 },
33144 { NULL, "ppc", 0 } };
33145 const char *cpu_id = "";
33146 size_t i;
33148 rs6000_file_start ();
33149 darwin_file_start ();
33151 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33153 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33154 cpu_id = rs6000_default_cpu;
33156 if (global_options_set.x_rs6000_cpu_index)
33157 cpu_id = processor_target_table[rs6000_cpu_index].name;
33159 /* Look through the mapping array. Pick the first name that either
33160 matches the argument, has a bit set in IF_SET that is also set
33161 in the target flags, or has a NULL name. */
33163 i = 0;
33164 while (mapping[i].arg != NULL
33165 && strcmp (mapping[i].arg, cpu_id) != 0
33166 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33167 i++;
33169 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33172 #endif /* TARGET_MACHO */
33174 #if TARGET_ELF
33175 static int
33176 rs6000_elf_reloc_rw_mask (void)
33178 if (flag_pic)
33179 return 3;
33180 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33181 return 2;
33182 else
33183 return 0;
33186 /* Record an element in the table of global constructors. SYMBOL is
33187 a SYMBOL_REF of the function to be called; PRIORITY is a number
33188 between 0 and MAX_INIT_PRIORITY.
33190 This differs from default_named_section_asm_out_constructor in
33191 that we have special handling for -mrelocatable. */
33193 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33194 static void
33195 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33197 const char *section = ".ctors";
33198 char buf[16];
33200 if (priority != DEFAULT_INIT_PRIORITY)
33202 sprintf (buf, ".ctors.%.5u",
33203 /* Invert the numbering so the linker puts us in the proper
33204 order; constructors are run from right to left, and the
33205 linker sorts in increasing order. */
33206 MAX_INIT_PRIORITY - priority);
33207 section = buf;
33210 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33211 assemble_align (POINTER_SIZE);
33213 if (DEFAULT_ABI == ABI_V4
33214 && (TARGET_RELOCATABLE || flag_pic > 1))
33216 fputs ("\t.long (", asm_out_file);
33217 output_addr_const (asm_out_file, symbol);
33218 fputs (")@fixup\n", asm_out_file);
33220 else
33221 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33224 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33225 static void
33226 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33228 const char *section = ".dtors";
33229 char buf[16];
33231 if (priority != DEFAULT_INIT_PRIORITY)
33233 sprintf (buf, ".dtors.%.5u",
33234 /* Invert the numbering so the linker puts us in the proper
33235 order; constructors are run from right to left, and the
33236 linker sorts in increasing order. */
33237 MAX_INIT_PRIORITY - priority);
33238 section = buf;
33241 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33242 assemble_align (POINTER_SIZE);
33244 if (DEFAULT_ABI == ABI_V4
33245 && (TARGET_RELOCATABLE || flag_pic > 1))
33247 fputs ("\t.long (", asm_out_file);
33248 output_addr_const (asm_out_file, symbol);
33249 fputs (")@fixup\n", asm_out_file);
33251 else
33252 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33255 void
33256 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33258 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33260 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33261 ASM_OUTPUT_LABEL (file, name);
33262 fputs (DOUBLE_INT_ASM_OP, file);
33263 rs6000_output_function_entry (file, name);
33264 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33265 if (DOT_SYMBOLS)
33267 fputs ("\t.size\t", file);
33268 assemble_name (file, name);
33269 fputs (",24\n\t.type\t.", file);
33270 assemble_name (file, name);
33271 fputs (",@function\n", file);
33272 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33274 fputs ("\t.globl\t.", file);
33275 assemble_name (file, name);
33276 putc ('\n', file);
33279 else
33280 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33281 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33282 rs6000_output_function_entry (file, name);
33283 fputs (":\n", file);
33284 return;
33287 if (DEFAULT_ABI == ABI_V4
33288 && (TARGET_RELOCATABLE || flag_pic > 1)
33289 && !TARGET_SECURE_PLT
33290 && (get_pool_size () != 0 || crtl->profile)
33291 && uses_TOC ())
33293 char buf[256];
33295 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33297 fprintf (file, "\t.long ");
33298 assemble_name (file, toc_label_name);
33299 need_toc_init = 1;
33300 putc ('-', file);
33301 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33302 assemble_name (file, buf);
33303 putc ('\n', file);
33306 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33307 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33309 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33311 char buf[256];
33313 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33315 fprintf (file, "\t.quad .TOC.-");
33316 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33317 assemble_name (file, buf);
33318 putc ('\n', file);
33321 if (DEFAULT_ABI == ABI_AIX)
33323 const char *desc_name, *orig_name;
33325 orig_name = (*targetm.strip_name_encoding) (name);
33326 desc_name = orig_name;
33327 while (*desc_name == '.')
33328 desc_name++;
33330 if (TREE_PUBLIC (decl))
33331 fprintf (file, "\t.globl %s\n", desc_name);
33333 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33334 fprintf (file, "%s:\n", desc_name);
33335 fprintf (file, "\t.long %s\n", orig_name);
33336 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33337 fputs ("\t.long 0\n", file);
33338 fprintf (file, "\t.previous\n");
33340 ASM_OUTPUT_LABEL (file, name);
33343 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33344 static void
33345 rs6000_elf_file_end (void)
33347 #ifdef HAVE_AS_GNU_ATTRIBUTE
33348 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33350 if (rs6000_passes_float)
33351 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
33352 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
33353 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
33354 : 2));
33355 if (rs6000_passes_vector)
33356 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33357 (TARGET_ALTIVEC_ABI ? 2
33358 : TARGET_SPE_ABI ? 3
33359 : 1));
33360 if (rs6000_returns_struct)
33361 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33362 aix_struct_return ? 2 : 1);
33364 #endif
33365 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33366 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33367 file_end_indicate_exec_stack ();
33368 #endif
33370 if (flag_split_stack)
33371 file_end_indicate_split_stack ();
33373 if (cpu_builtin_p)
33375 /* We have expanded a CPU builtin, so we need to emit a reference to
33376 the special symbol that LIBC uses to declare it supports the
33377 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33378 switch_to_section (data_section);
33379 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33380 fprintf (asm_out_file, "\t%s %s\n",
33381 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33384 #endif
33386 #if TARGET_XCOFF
33388 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33389 #define HAVE_XCOFF_DWARF_EXTRAS 0
33390 #endif
33392 static enum unwind_info_type
33393 rs6000_xcoff_debug_unwind_info (void)
33395 return UI_NONE;
33398 static void
33399 rs6000_xcoff_asm_output_anchor (rtx symbol)
33401 char buffer[100];
33403 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33404 SYMBOL_REF_BLOCK_OFFSET (symbol));
33405 fprintf (asm_out_file, "%s", SET_ASM_OP);
33406 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33407 fprintf (asm_out_file, ",");
33408 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33409 fprintf (asm_out_file, "\n");
33412 static void
33413 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33415 fputs (GLOBAL_ASM_OP, stream);
33416 RS6000_OUTPUT_BASENAME (stream, name);
33417 putc ('\n', stream);
33420 /* A get_unnamed_decl callback, used for read-only sections. PTR
33421 points to the section string variable. */
33423 static void
33424 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33426 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33427 *(const char *const *) directive,
33428 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33431 /* Likewise for read-write sections. */
33433 static void
33434 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33436 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33437 *(const char *const *) directive,
33438 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33441 static void
33442 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33444 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33445 *(const char *const *) directive,
33446 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33449 /* A get_unnamed_section callback, used for switching to toc_section. */
33451 static void
33452 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33454 if (TARGET_MINIMAL_TOC)
33456 /* toc_section is always selected at least once from
33457 rs6000_xcoff_file_start, so this is guaranteed to
33458 always be defined once and only once in each file. */
33459 if (!toc_initialized)
33461 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33462 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33463 toc_initialized = 1;
33465 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33466 (TARGET_32BIT ? "" : ",3"));
33468 else
33469 fputs ("\t.toc\n", asm_out_file);
33472 /* Implement TARGET_ASM_INIT_SECTIONS. */
33474 static void
33475 rs6000_xcoff_asm_init_sections (void)
33477 read_only_data_section
33478 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33479 &xcoff_read_only_section_name);
33481 private_data_section
33482 = get_unnamed_section (SECTION_WRITE,
33483 rs6000_xcoff_output_readwrite_section_asm_op,
33484 &xcoff_private_data_section_name);
33486 tls_data_section
33487 = get_unnamed_section (SECTION_TLS,
33488 rs6000_xcoff_output_tls_section_asm_op,
33489 &xcoff_tls_data_section_name);
33491 tls_private_data_section
33492 = get_unnamed_section (SECTION_TLS,
33493 rs6000_xcoff_output_tls_section_asm_op,
33494 &xcoff_private_data_section_name);
33496 read_only_private_data_section
33497 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33498 &xcoff_private_data_section_name);
33500 toc_section
33501 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33503 readonly_data_section = read_only_data_section;
33506 static int
33507 rs6000_xcoff_reloc_rw_mask (void)
33509 return 3;
33512 static void
33513 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33514 tree decl ATTRIBUTE_UNUSED)
33516 int smclass;
33517 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33519 if (flags & SECTION_EXCLUDE)
33520 smclass = 4;
33521 else if (flags & SECTION_DEBUG)
33523 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33524 return;
33526 else if (flags & SECTION_CODE)
33527 smclass = 0;
33528 else if (flags & SECTION_TLS)
33529 smclass = 3;
33530 else if (flags & SECTION_WRITE)
33531 smclass = 2;
33532 else
33533 smclass = 1;
33535 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33536 (flags & SECTION_CODE) ? "." : "",
33537 name, suffix[smclass], flags & SECTION_ENTSIZE);
33540 #define IN_NAMED_SECTION(DECL) \
33541 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33542 && DECL_SECTION_NAME (DECL) != NULL)
33544 static section *
33545 rs6000_xcoff_select_section (tree decl, int reloc,
33546 unsigned HOST_WIDE_INT align)
33548 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33549 named section. */
33550 if (align > BIGGEST_ALIGNMENT)
33552 resolve_unique_section (decl, reloc, true);
33553 if (IN_NAMED_SECTION (decl))
33554 return get_named_section (decl, NULL, reloc);
33557 if (decl_readonly_section (decl, reloc))
33559 if (TREE_PUBLIC (decl))
33560 return read_only_data_section;
33561 else
33562 return read_only_private_data_section;
33564 else
33566 #if HAVE_AS_TLS
33567 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33569 if (TREE_PUBLIC (decl))
33570 return tls_data_section;
33571 else if (bss_initializer_p (decl))
33573 /* Convert to COMMON to emit in BSS. */
33574 DECL_COMMON (decl) = 1;
33575 return tls_comm_section;
33577 else
33578 return tls_private_data_section;
33580 else
33581 #endif
33582 if (TREE_PUBLIC (decl))
33583 return data_section;
33584 else
33585 return private_data_section;
33589 static void
33590 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33592 const char *name;
33594 /* Use select_section for private data and uninitialized data with
33595 alignment <= BIGGEST_ALIGNMENT. */
33596 if (!TREE_PUBLIC (decl)
33597 || DECL_COMMON (decl)
33598 || (DECL_INITIAL (decl) == NULL_TREE
33599 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33600 || DECL_INITIAL (decl) == error_mark_node
33601 || (flag_zero_initialized_in_bss
33602 && initializer_zerop (DECL_INITIAL (decl))))
33603 return;
33605 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33606 name = (*targetm.strip_name_encoding) (name);
33607 set_decl_section_name (decl, name);
33610 /* Select section for constant in constant pool.
33612 On RS/6000, all constants are in the private read-only data area.
33613 However, if this is being placed in the TOC it must be output as a
33614 toc entry. */
33616 static section *
33617 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33618 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33620 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33621 return toc_section;
33622 else
33623 return read_only_private_data_section;
33626 /* Remove any trailing [DS] or the like from the symbol name. */
33628 static const char *
33629 rs6000_xcoff_strip_name_encoding (const char *name)
33631 size_t len;
33632 if (*name == '*')
33633 name++;
33634 len = strlen (name);
33635 if (name[len - 1] == ']')
33636 return ggc_alloc_string (name, len - 4);
33637 else
33638 return name;
33641 /* Section attributes. AIX is always PIC. */
33643 static unsigned int
33644 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33646 unsigned int align;
33647 unsigned int flags = default_section_type_flags (decl, name, reloc);
33649 /* Align to at least UNIT size. */
33650 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33651 align = MIN_UNITS_PER_WORD;
33652 else
33653 /* Increase alignment of large objects if not already stricter. */
33654 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33655 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33656 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33658 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33661 /* Output at beginning of assembler file.
33663 Initialize the section names for the RS/6000 at this point.
33665 Specify filename, including full path, to assembler.
33667 We want to go into the TOC section so at least one .toc will be emitted.
33668 Also, in order to output proper .bs/.es pairs, we need at least one static
33669 [RW] section emitted.
33671 Finally, declare mcount when profiling to make the assembler happy. */
33673 static void
33674 rs6000_xcoff_file_start (void)
33676 rs6000_gen_section_name (&xcoff_bss_section_name,
33677 main_input_filename, ".bss_");
33678 rs6000_gen_section_name (&xcoff_private_data_section_name,
33679 main_input_filename, ".rw_");
33680 rs6000_gen_section_name (&xcoff_read_only_section_name,
33681 main_input_filename, ".ro_");
33682 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33683 main_input_filename, ".tls_");
33684 rs6000_gen_section_name (&xcoff_tbss_section_name,
33685 main_input_filename, ".tbss_[UL]");
33687 fputs ("\t.file\t", asm_out_file);
33688 output_quoted_string (asm_out_file, main_input_filename);
33689 fputc ('\n', asm_out_file);
33690 if (write_symbols != NO_DEBUG)
33691 switch_to_section (private_data_section);
33692 switch_to_section (toc_section);
33693 switch_to_section (text_section);
33694 if (profile_flag)
33695 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33696 rs6000_file_start ();
33699 /* Output at end of assembler file.
33700 On the RS/6000, referencing data should automatically pull in text. */
33702 static void
33703 rs6000_xcoff_file_end (void)
33705 switch_to_section (text_section);
33706 fputs ("_section_.text:\n", asm_out_file);
33707 switch_to_section (data_section);
33708 fputs (TARGET_32BIT
33709 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33710 asm_out_file);
33713 struct declare_alias_data
33715 FILE *file;
33716 bool function_descriptor;
33719 /* Declare alias N. A helper function for for_node_and_aliases. */
33721 static bool
33722 rs6000_declare_alias (struct symtab_node *n, void *d)
33724 struct declare_alias_data *data = (struct declare_alias_data *)d;
33725 /* Main symbol is output specially, because varasm machinery does part of
33726 the job for us - we do not need to declare .globl/lglobs and such. */
33727 if (!n->alias || n->weakref)
33728 return false;
33730 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33731 return false;
33733 /* Prevent assemble_alias from trying to use .set pseudo operation
33734 that does not behave as expected by the middle-end. */
33735 TREE_ASM_WRITTEN (n->decl) = true;
33737 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33738 char *buffer = (char *) alloca (strlen (name) + 2);
33739 char *p;
33740 int dollar_inside = 0;
33742 strcpy (buffer, name);
33743 p = strchr (buffer, '$');
33744 while (p) {
33745 *p = '_';
33746 dollar_inside++;
33747 p = strchr (p + 1, '$');
33749 if (TREE_PUBLIC (n->decl))
33751 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33753 if (dollar_inside) {
33754 if (data->function_descriptor)
33755 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33756 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33758 if (data->function_descriptor)
33760 fputs ("\t.globl .", data->file);
33761 RS6000_OUTPUT_BASENAME (data->file, buffer);
33762 putc ('\n', data->file);
33764 fputs ("\t.globl ", data->file);
33765 RS6000_OUTPUT_BASENAME (data->file, buffer);
33766 putc ('\n', data->file);
33768 #ifdef ASM_WEAKEN_DECL
33769 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33770 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33771 #endif
33773 else
33775 if (dollar_inside)
33777 if (data->function_descriptor)
33778 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33779 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33781 if (data->function_descriptor)
33783 fputs ("\t.lglobl .", data->file);
33784 RS6000_OUTPUT_BASENAME (data->file, buffer);
33785 putc ('\n', data->file);
33787 fputs ("\t.lglobl ", data->file);
33788 RS6000_OUTPUT_BASENAME (data->file, buffer);
33789 putc ('\n', data->file);
33791 if (data->function_descriptor)
33792 fputs (".", data->file);
33793 RS6000_OUTPUT_BASENAME (data->file, buffer);
33794 fputs (":\n", data->file);
33795 return false;
33798 /* This macro produces the initial definition of a function name.
33799 On the RS/6000, we need to place an extra '.' in the function name and
33800 output the function descriptor.
33801 Dollar signs are converted to underscores.
33803 The csect for the function will have already been created when
33804 text_section was selected. We do have to go back to that csect, however.
33806 The third and fourth parameters to the .function pseudo-op (16 and 044)
33807 are placeholders which no longer have any use.
33809 Because AIX assembler's .set command has unexpected semantics, we output
33810 all aliases as alternative labels in front of the definition. */
33812 void
33813 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33815 char *buffer = (char *) alloca (strlen (name) + 1);
33816 char *p;
33817 int dollar_inside = 0;
33818 struct declare_alias_data data = {file, false};
33820 strcpy (buffer, name);
33821 p = strchr (buffer, '$');
33822 while (p) {
33823 *p = '_';
33824 dollar_inside++;
33825 p = strchr (p + 1, '$');
33827 if (TREE_PUBLIC (decl))
33829 if (!RS6000_WEAK || !DECL_WEAK (decl))
33831 if (dollar_inside) {
33832 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33833 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33835 fputs ("\t.globl .", file);
33836 RS6000_OUTPUT_BASENAME (file, buffer);
33837 putc ('\n', file);
33840 else
33842 if (dollar_inside) {
33843 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33844 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33846 fputs ("\t.lglobl .", file);
33847 RS6000_OUTPUT_BASENAME (file, buffer);
33848 putc ('\n', file);
33850 fputs ("\t.csect ", file);
33851 RS6000_OUTPUT_BASENAME (file, buffer);
33852 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33853 RS6000_OUTPUT_BASENAME (file, buffer);
33854 fputs (":\n", file);
33855 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
33856 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33857 RS6000_OUTPUT_BASENAME (file, buffer);
33858 fputs (", TOC[tc0], 0\n", file);
33859 in_section = NULL;
33860 switch_to_section (function_section (decl));
33861 putc ('.', file);
33862 RS6000_OUTPUT_BASENAME (file, buffer);
33863 fputs (":\n", file);
33864 data.function_descriptor = true;
33865 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
33866 if (!DECL_IGNORED_P (decl))
33868 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33869 xcoffout_declare_function (file, decl, buffer);
33870 else if (write_symbols == DWARF2_DEBUG)
33872 name = (*targetm.strip_name_encoding) (name);
33873 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33876 return;
33879 /* This macro produces the initial definition of a object (variable) name.
33880 Because AIX assembler's .set command has unexpected semantics, we output
33881 all aliases as alternative labels in front of the definition. */
33883 void
33884 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33886 struct declare_alias_data data = {file, false};
33887 RS6000_OUTPUT_BASENAME (file, name);
33888 fputs (":\n", file);
33889 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
33892 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33894 void
33895 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33897 fputs (integer_asm_op (size, FALSE), file);
33898 assemble_name (file, label);
33899 fputs ("-$", file);
33902 /* Output a symbol offset relative to the dbase for the current object.
33903 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33904 signed offsets.
33906 __gcc_unwind_dbase is embedded in all executables/libraries through
33907 libgcc/config/rs6000/crtdbase.S. */
33909 void
33910 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
33912 fputs (integer_asm_op (size, FALSE), file);
33913 assemble_name (file, label);
33914 fputs("-__gcc_unwind_dbase", file);
33917 #ifdef HAVE_AS_TLS
33918 static void
33919 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
33921 rtx symbol;
33922 int flags;
33924 default_encode_section_info (decl, rtl, first);
33926 /* Careful not to prod global register variables. */
33927 if (!MEM_P (rtl))
33928 return;
33929 symbol = XEXP (rtl, 0);
33930 if (GET_CODE (symbol) != SYMBOL_REF)
33931 return;
33933 flags = SYMBOL_REF_FLAGS (symbol);
33935 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33936 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
33938 SYMBOL_REF_FLAGS (symbol) = flags;
33940 #endif /* HAVE_AS_TLS */
33941 #endif /* TARGET_XCOFF */
33943 /* Return true if INSN should not be copied. */
33945 static bool
33946 rs6000_cannot_copy_insn_p (rtx_insn *insn)
33948 return recog_memoized (insn) >= 0
33949 && get_attr_cannot_copy (insn);
33952 /* Compute a (partial) cost for rtx X. Return true if the complete
33953 cost has been computed, and false if subexpressions should be
33954 scanned. In either case, *TOTAL contains the cost result. */
33956 static bool
33957 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
33958 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
33960 int code = GET_CODE (x);
33962 switch (code)
33964 /* On the RS/6000, if it is valid in the insn, it is free. */
33965 case CONST_INT:
33966 if (((outer_code == SET
33967 || outer_code == PLUS
33968 || outer_code == MINUS)
33969 && (satisfies_constraint_I (x)
33970 || satisfies_constraint_L (x)))
33971 || (outer_code == AND
33972 && (satisfies_constraint_K (x)
33973 || (mode == SImode
33974 ? satisfies_constraint_L (x)
33975 : satisfies_constraint_J (x))))
33976 || ((outer_code == IOR || outer_code == XOR)
33977 && (satisfies_constraint_K (x)
33978 || (mode == SImode
33979 ? satisfies_constraint_L (x)
33980 : satisfies_constraint_J (x))))
33981 || outer_code == ASHIFT
33982 || outer_code == ASHIFTRT
33983 || outer_code == LSHIFTRT
33984 || outer_code == ROTATE
33985 || outer_code == ROTATERT
33986 || outer_code == ZERO_EXTRACT
33987 || (outer_code == MULT
33988 && satisfies_constraint_I (x))
33989 || ((outer_code == DIV || outer_code == UDIV
33990 || outer_code == MOD || outer_code == UMOD)
33991 && exact_log2 (INTVAL (x)) >= 0)
33992 || (outer_code == COMPARE
33993 && (satisfies_constraint_I (x)
33994 || satisfies_constraint_K (x)))
33995 || ((outer_code == EQ || outer_code == NE)
33996 && (satisfies_constraint_I (x)
33997 || satisfies_constraint_K (x)
33998 || (mode == SImode
33999 ? satisfies_constraint_L (x)
34000 : satisfies_constraint_J (x))))
34001 || (outer_code == GTU
34002 && satisfies_constraint_I (x))
34003 || (outer_code == LTU
34004 && satisfies_constraint_P (x)))
34006 *total = 0;
34007 return true;
34009 else if ((outer_code == PLUS
34010 && reg_or_add_cint_operand (x, VOIDmode))
34011 || (outer_code == MINUS
34012 && reg_or_sub_cint_operand (x, VOIDmode))
34013 || ((outer_code == SET
34014 || outer_code == IOR
34015 || outer_code == XOR)
34016 && (INTVAL (x)
34017 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34019 *total = COSTS_N_INSNS (1);
34020 return true;
34022 /* FALLTHRU */
34024 case CONST_DOUBLE:
34025 case CONST_WIDE_INT:
34026 case CONST:
34027 case HIGH:
34028 case SYMBOL_REF:
34029 case MEM:
34030 /* When optimizing for size, MEM should be slightly more expensive
34031 than generating address, e.g., (plus (reg) (const)).
34032 L1 cache latency is about two instructions. */
34033 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34034 return true;
34036 case LABEL_REF:
34037 *total = 0;
34038 return true;
34040 case PLUS:
34041 case MINUS:
34042 if (FLOAT_MODE_P (mode))
34043 *total = rs6000_cost->fp;
34044 else
34045 *total = COSTS_N_INSNS (1);
34046 return false;
34048 case MULT:
34049 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34050 && satisfies_constraint_I (XEXP (x, 1)))
34052 if (INTVAL (XEXP (x, 1)) >= -256
34053 && INTVAL (XEXP (x, 1)) <= 255)
34054 *total = rs6000_cost->mulsi_const9;
34055 else
34056 *total = rs6000_cost->mulsi_const;
34058 else if (mode == SFmode)
34059 *total = rs6000_cost->fp;
34060 else if (FLOAT_MODE_P (mode))
34061 *total = rs6000_cost->dmul;
34062 else if (mode == DImode)
34063 *total = rs6000_cost->muldi;
34064 else
34065 *total = rs6000_cost->mulsi;
34066 return false;
34068 case FMA:
34069 if (mode == SFmode)
34070 *total = rs6000_cost->fp;
34071 else
34072 *total = rs6000_cost->dmul;
34073 break;
34075 case DIV:
34076 case MOD:
34077 if (FLOAT_MODE_P (mode))
34079 *total = mode == DFmode ? rs6000_cost->ddiv
34080 : rs6000_cost->sdiv;
34081 return false;
34083 /* FALLTHRU */
34085 case UDIV:
34086 case UMOD:
34087 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34088 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34090 if (code == DIV || code == MOD)
34091 /* Shift, addze */
34092 *total = COSTS_N_INSNS (2);
34093 else
34094 /* Shift */
34095 *total = COSTS_N_INSNS (1);
34097 else
34099 if (GET_MODE (XEXP (x, 1)) == DImode)
34100 *total = rs6000_cost->divdi;
34101 else
34102 *total = rs6000_cost->divsi;
34104 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34105 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34106 *total += COSTS_N_INSNS (2);
34107 return false;
34109 case CTZ:
34110 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34111 return false;
34113 case FFS:
34114 *total = COSTS_N_INSNS (4);
34115 return false;
34117 case POPCOUNT:
34118 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34119 return false;
34121 case PARITY:
34122 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34123 return false;
34125 case NOT:
34126 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34127 *total = 0;
34128 else
34129 *total = COSTS_N_INSNS (1);
34130 return false;
34132 case AND:
34133 if (CONST_INT_P (XEXP (x, 1)))
34135 rtx left = XEXP (x, 0);
34136 rtx_code left_code = GET_CODE (left);
34138 /* rotate-and-mask: 1 insn. */
34139 if ((left_code == ROTATE
34140 || left_code == ASHIFT
34141 || left_code == LSHIFTRT)
34142 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34144 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34145 if (!CONST_INT_P (XEXP (left, 1)))
34146 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34147 *total += COSTS_N_INSNS (1);
34148 return true;
34151 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34152 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34153 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34154 || (val & 0xffff) == val
34155 || (val & 0xffff0000) == val
34156 || ((val & 0xffff) == 0 && mode == SImode))
34158 *total = rtx_cost (left, mode, AND, 0, speed);
34159 *total += COSTS_N_INSNS (1);
34160 return true;
34163 /* 2 insns. */
34164 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34166 *total = rtx_cost (left, mode, AND, 0, speed);
34167 *total += COSTS_N_INSNS (2);
34168 return true;
34172 *total = COSTS_N_INSNS (1);
34173 return false;
34175 case IOR:
34176 /* FIXME */
34177 *total = COSTS_N_INSNS (1);
34178 return true;
34180 case CLZ:
34181 case XOR:
34182 case ZERO_EXTRACT:
34183 *total = COSTS_N_INSNS (1);
34184 return false;
34186 case ASHIFT:
34187 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34188 the sign extend and shift separately within the insn. */
34189 if (TARGET_EXTSWSLI && mode == DImode
34190 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34191 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34193 *total = 0;
34194 return false;
34196 /* fall through */
34198 case ASHIFTRT:
34199 case LSHIFTRT:
34200 case ROTATE:
34201 case ROTATERT:
34202 /* Handle mul_highpart. */
34203 if (outer_code == TRUNCATE
34204 && GET_CODE (XEXP (x, 0)) == MULT)
34206 if (mode == DImode)
34207 *total = rs6000_cost->muldi;
34208 else
34209 *total = rs6000_cost->mulsi;
34210 return true;
34212 else if (outer_code == AND)
34213 *total = 0;
34214 else
34215 *total = COSTS_N_INSNS (1);
34216 return false;
34218 case SIGN_EXTEND:
34219 case ZERO_EXTEND:
34220 if (GET_CODE (XEXP (x, 0)) == MEM)
34221 *total = 0;
34222 else
34223 *total = COSTS_N_INSNS (1);
34224 return false;
34226 case COMPARE:
34227 case NEG:
34228 case ABS:
34229 if (!FLOAT_MODE_P (mode))
34231 *total = COSTS_N_INSNS (1);
34232 return false;
34234 /* FALLTHRU */
34236 case FLOAT:
34237 case UNSIGNED_FLOAT:
34238 case FIX:
34239 case UNSIGNED_FIX:
34240 case FLOAT_TRUNCATE:
34241 *total = rs6000_cost->fp;
34242 return false;
34244 case FLOAT_EXTEND:
34245 if (mode == DFmode)
34246 *total = rs6000_cost->sfdf_convert;
34247 else
34248 *total = rs6000_cost->fp;
34249 return false;
34251 case UNSPEC:
34252 switch (XINT (x, 1))
34254 case UNSPEC_FRSP:
34255 *total = rs6000_cost->fp;
34256 return true;
34258 default:
34259 break;
34261 break;
34263 case CALL:
34264 case IF_THEN_ELSE:
34265 if (!speed)
34267 *total = COSTS_N_INSNS (1);
34268 return true;
34270 else if (FLOAT_MODE_P (mode)
34271 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
34273 *total = rs6000_cost->fp;
34274 return false;
34276 break;
34278 case NE:
34279 case EQ:
34280 case GTU:
34281 case LTU:
34282 /* Carry bit requires mode == Pmode.
34283 NEG or PLUS already counted so only add one. */
34284 if (mode == Pmode
34285 && (outer_code == NEG || outer_code == PLUS))
34287 *total = COSTS_N_INSNS (1);
34288 return true;
34290 if (outer_code == SET)
34292 if (XEXP (x, 1) == const0_rtx)
34294 if (TARGET_ISEL && !TARGET_MFCRF)
34295 *total = COSTS_N_INSNS (8);
34296 else
34297 *total = COSTS_N_INSNS (2);
34298 return true;
34300 else
34302 *total = COSTS_N_INSNS (3);
34303 return false;
34306 /* FALLTHRU */
34308 case GT:
34309 case LT:
34310 case UNORDERED:
34311 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34313 if (TARGET_ISEL && !TARGET_MFCRF)
34314 *total = COSTS_N_INSNS (8);
34315 else
34316 *total = COSTS_N_INSNS (2);
34317 return true;
34319 /* CC COMPARE. */
34320 if (outer_code == COMPARE)
34322 *total = 0;
34323 return true;
34325 break;
34327 default:
34328 break;
34331 return false;
34334 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34336 static bool
34337 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34338 int opno, int *total, bool speed)
34340 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34342 fprintf (stderr,
34343 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34344 "opno = %d, total = %d, speed = %s, x:\n",
34345 ret ? "complete" : "scan inner",
34346 GET_MODE_NAME (mode),
34347 GET_RTX_NAME (outer_code),
34348 opno,
34349 *total,
34350 speed ? "true" : "false");
34352 debug_rtx (x);
34354 return ret;
34357 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34359 static int
34360 rs6000_debug_address_cost (rtx x, machine_mode mode,
34361 addr_space_t as, bool speed)
34363 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34365 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34366 ret, speed ? "true" : "false");
34367 debug_rtx (x);
34369 return ret;
34373 /* A C expression returning the cost of moving data from a register of class
34374 CLASS1 to one of CLASS2. */
34376 static int
34377 rs6000_register_move_cost (machine_mode mode,
34378 reg_class_t from, reg_class_t to)
34380 int ret;
34382 if (TARGET_DEBUG_COST)
34383 dbg_cost_ctrl++;
34385 /* Moves from/to GENERAL_REGS. */
34386 if (reg_classes_intersect_p (to, GENERAL_REGS)
34387 || reg_classes_intersect_p (from, GENERAL_REGS))
34389 reg_class_t rclass = from;
34391 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34392 rclass = to;
34394 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34395 ret = (rs6000_memory_move_cost (mode, rclass, false)
34396 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34398 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34399 shift. */
34400 else if (rclass == CR_REGS)
34401 ret = 4;
34403 /* For those processors that have slow LR/CTR moves, make them more
34404 expensive than memory in order to bias spills to memory .*/
34405 else if ((rs6000_cpu == PROCESSOR_POWER6
34406 || rs6000_cpu == PROCESSOR_POWER7
34407 || rs6000_cpu == PROCESSOR_POWER8
34408 || rs6000_cpu == PROCESSOR_POWER9)
34409 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34410 ret = 6 * hard_regno_nregs[0][mode];
34412 else
34413 /* A move will cost one instruction per GPR moved. */
34414 ret = 2 * hard_regno_nregs[0][mode];
34417 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34418 else if (VECTOR_MEM_VSX_P (mode)
34419 && reg_classes_intersect_p (to, VSX_REGS)
34420 && reg_classes_intersect_p (from, VSX_REGS))
34421 ret = 2 * hard_regno_nregs[32][mode];
34423 /* Moving between two similar registers is just one instruction. */
34424 else if (reg_classes_intersect_p (to, from))
34425 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34427 /* Everything else has to go through GENERAL_REGS. */
34428 else
34429 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34430 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34432 if (TARGET_DEBUG_COST)
34434 if (dbg_cost_ctrl == 1)
34435 fprintf (stderr,
34436 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34437 ret, GET_MODE_NAME (mode), reg_class_names[from],
34438 reg_class_names[to]);
34439 dbg_cost_ctrl--;
34442 return ret;
34445 /* A C expressions returning the cost of moving data of MODE from a register to
34446 or from memory. */
34448 static int
34449 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34450 bool in ATTRIBUTE_UNUSED)
34452 int ret;
34454 if (TARGET_DEBUG_COST)
34455 dbg_cost_ctrl++;
34457 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34458 ret = 4 * hard_regno_nregs[0][mode];
34459 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34460 || reg_classes_intersect_p (rclass, VSX_REGS)))
34461 ret = 4 * hard_regno_nregs[32][mode];
34462 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34463 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
34464 else
34465 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34467 if (TARGET_DEBUG_COST)
34469 if (dbg_cost_ctrl == 1)
34470 fprintf (stderr,
34471 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34472 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34473 dbg_cost_ctrl--;
34476 return ret;
34479 /* Returns a code for a target-specific builtin that implements
34480 reciprocal of the function, or NULL_TREE if not available. */
34482 static tree
34483 rs6000_builtin_reciprocal (tree fndecl)
34485 switch (DECL_FUNCTION_CODE (fndecl))
34487 case VSX_BUILTIN_XVSQRTDP:
34488 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34489 return NULL_TREE;
34491 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34493 case VSX_BUILTIN_XVSQRTSP:
34494 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34495 return NULL_TREE;
34497 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34499 default:
34500 return NULL_TREE;
34504 /* Load up a constant. If the mode is a vector mode, splat the value across
34505 all of the vector elements. */
34507 static rtx
34508 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34510 rtx reg;
34512 if (mode == SFmode || mode == DFmode)
34514 rtx d = const_double_from_real_value (dconst, mode);
34515 reg = force_reg (mode, d);
34517 else if (mode == V4SFmode)
34519 rtx d = const_double_from_real_value (dconst, SFmode);
34520 rtvec v = gen_rtvec (4, d, d, d, d);
34521 reg = gen_reg_rtx (mode);
34522 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34524 else if (mode == V2DFmode)
34526 rtx d = const_double_from_real_value (dconst, DFmode);
34527 rtvec v = gen_rtvec (2, d, d);
34528 reg = gen_reg_rtx (mode);
34529 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34531 else
34532 gcc_unreachable ();
34534 return reg;
34537 /* Generate an FMA instruction. */
34539 static void
34540 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34542 machine_mode mode = GET_MODE (target);
34543 rtx dst;
34545 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34546 gcc_assert (dst != NULL);
34548 if (dst != target)
34549 emit_move_insn (target, dst);
34552 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34554 static void
34555 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34557 machine_mode mode = GET_MODE (dst);
34558 rtx r;
34560 /* This is a tad more complicated, since the fnma_optab is for
34561 a different expression: fma(-m1, m2, a), which is the same
34562 thing except in the case of signed zeros.
34564 Fortunately we know that if FMA is supported that FNMSUB is
34565 also supported in the ISA. Just expand it directly. */
34567 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34569 r = gen_rtx_NEG (mode, a);
34570 r = gen_rtx_FMA (mode, m1, m2, r);
34571 r = gen_rtx_NEG (mode, r);
34572 emit_insn (gen_rtx_SET (dst, r));
34575 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34576 add a reg_note saying that this was a division. Support both scalar and
34577 vector divide. Assumes no trapping math and finite arguments. */
34579 void
34580 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34582 machine_mode mode = GET_MODE (dst);
34583 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34584 int i;
34586 /* Low precision estimates guarantee 5 bits of accuracy. High
34587 precision estimates guarantee 14 bits of accuracy. SFmode
34588 requires 23 bits of accuracy. DFmode requires 52 bits of
34589 accuracy. Each pass at least doubles the accuracy, leading
34590 to the following. */
34591 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34592 if (mode == DFmode || mode == V2DFmode)
34593 passes++;
34595 enum insn_code code = optab_handler (smul_optab, mode);
34596 insn_gen_fn gen_mul = GEN_FCN (code);
34598 gcc_assert (code != CODE_FOR_nothing);
34600 one = rs6000_load_constant_and_splat (mode, dconst1);
34602 /* x0 = 1./d estimate */
34603 x0 = gen_reg_rtx (mode);
34604 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34605 UNSPEC_FRES)));
34607 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34608 if (passes > 1) {
34610 /* e0 = 1. - d * x0 */
34611 e0 = gen_reg_rtx (mode);
34612 rs6000_emit_nmsub (e0, d, x0, one);
34614 /* x1 = x0 + e0 * x0 */
34615 x1 = gen_reg_rtx (mode);
34616 rs6000_emit_madd (x1, e0, x0, x0);
34618 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34619 ++i, xprev = xnext, eprev = enext) {
34621 /* enext = eprev * eprev */
34622 enext = gen_reg_rtx (mode);
34623 emit_insn (gen_mul (enext, eprev, eprev));
34625 /* xnext = xprev + enext * xprev */
34626 xnext = gen_reg_rtx (mode);
34627 rs6000_emit_madd (xnext, enext, xprev, xprev);
34630 } else
34631 xprev = x0;
34633 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34635 /* u = n * xprev */
34636 u = gen_reg_rtx (mode);
34637 emit_insn (gen_mul (u, n, xprev));
34639 /* v = n - (d * u) */
34640 v = gen_reg_rtx (mode);
34641 rs6000_emit_nmsub (v, d, u, n);
34643 /* dst = (v * xprev) + u */
34644 rs6000_emit_madd (dst, v, xprev, u);
34646 if (note_p)
34647 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34650 /* Goldschmidt's Algorithm for single/double-precision floating point
34651 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34653 void
34654 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34656 machine_mode mode = GET_MODE (src);
34657 rtx e = gen_reg_rtx (mode);
34658 rtx g = gen_reg_rtx (mode);
34659 rtx h = gen_reg_rtx (mode);
34661 /* Low precision estimates guarantee 5 bits of accuracy. High
34662 precision estimates guarantee 14 bits of accuracy. SFmode
34663 requires 23 bits of accuracy. DFmode requires 52 bits of
34664 accuracy. Each pass at least doubles the accuracy, leading
34665 to the following. */
34666 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34667 if (mode == DFmode || mode == V2DFmode)
34668 passes++;
34670 int i;
34671 rtx mhalf;
34672 enum insn_code code = optab_handler (smul_optab, mode);
34673 insn_gen_fn gen_mul = GEN_FCN (code);
34675 gcc_assert (code != CODE_FOR_nothing);
34677 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34679 /* e = rsqrt estimate */
34680 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34681 UNSPEC_RSQRT)));
34683 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34684 if (!recip)
34686 rtx zero = force_reg (mode, CONST0_RTX (mode));
34688 if (mode == SFmode)
34690 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34691 e, zero, mode, 0);
34692 if (target != e)
34693 emit_move_insn (e, target);
34695 else
34697 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34698 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34702 /* g = sqrt estimate. */
34703 emit_insn (gen_mul (g, e, src));
34704 /* h = 1/(2*sqrt) estimate. */
34705 emit_insn (gen_mul (h, e, mhalf));
34707 if (recip)
34709 if (passes == 1)
34711 rtx t = gen_reg_rtx (mode);
34712 rs6000_emit_nmsub (t, g, h, mhalf);
34713 /* Apply correction directly to 1/rsqrt estimate. */
34714 rs6000_emit_madd (dst, e, t, e);
34716 else
34718 for (i = 0; i < passes; i++)
34720 rtx t1 = gen_reg_rtx (mode);
34721 rtx g1 = gen_reg_rtx (mode);
34722 rtx h1 = gen_reg_rtx (mode);
34724 rs6000_emit_nmsub (t1, g, h, mhalf);
34725 rs6000_emit_madd (g1, g, t1, g);
34726 rs6000_emit_madd (h1, h, t1, h);
34728 g = g1;
34729 h = h1;
34731 /* Multiply by 2 for 1/rsqrt. */
34732 emit_insn (gen_add3_insn (dst, h, h));
34735 else
34737 rtx t = gen_reg_rtx (mode);
34738 rs6000_emit_nmsub (t, g, h, mhalf);
34739 rs6000_emit_madd (dst, g, t, g);
34742 return;
34745 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34746 (Power7) targets. DST is the target, and SRC is the argument operand. */
34748 void
34749 rs6000_emit_popcount (rtx dst, rtx src)
34751 machine_mode mode = GET_MODE (dst);
34752 rtx tmp1, tmp2;
34754 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34755 if (TARGET_POPCNTD)
34757 if (mode == SImode)
34758 emit_insn (gen_popcntdsi2 (dst, src));
34759 else
34760 emit_insn (gen_popcntddi2 (dst, src));
34761 return;
34764 tmp1 = gen_reg_rtx (mode);
34766 if (mode == SImode)
34768 emit_insn (gen_popcntbsi2 (tmp1, src));
34769 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34770 NULL_RTX, 0);
34771 tmp2 = force_reg (SImode, tmp2);
34772 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34774 else
34776 emit_insn (gen_popcntbdi2 (tmp1, src));
34777 tmp2 = expand_mult (DImode, tmp1,
34778 GEN_INT ((HOST_WIDE_INT)
34779 0x01010101 << 32 | 0x01010101),
34780 NULL_RTX, 0);
34781 tmp2 = force_reg (DImode, tmp2);
34782 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34787 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34788 target, and SRC is the argument operand. */
34790 void
34791 rs6000_emit_parity (rtx dst, rtx src)
34793 machine_mode mode = GET_MODE (dst);
34794 rtx tmp;
34796 tmp = gen_reg_rtx (mode);
34798 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34799 if (TARGET_CMPB)
34801 if (mode == SImode)
34803 emit_insn (gen_popcntbsi2 (tmp, src));
34804 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34806 else
34808 emit_insn (gen_popcntbdi2 (tmp, src));
34809 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34811 return;
34814 if (mode == SImode)
34816 /* Is mult+shift >= shift+xor+shift+xor? */
34817 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34819 rtx tmp1, tmp2, tmp3, tmp4;
34821 tmp1 = gen_reg_rtx (SImode);
34822 emit_insn (gen_popcntbsi2 (tmp1, src));
34824 tmp2 = gen_reg_rtx (SImode);
34825 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
34826 tmp3 = gen_reg_rtx (SImode);
34827 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
34829 tmp4 = gen_reg_rtx (SImode);
34830 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
34831 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
34833 else
34834 rs6000_emit_popcount (tmp, src);
34835 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
34837 else
34839 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34840 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
34842 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
34844 tmp1 = gen_reg_rtx (DImode);
34845 emit_insn (gen_popcntbdi2 (tmp1, src));
34847 tmp2 = gen_reg_rtx (DImode);
34848 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
34849 tmp3 = gen_reg_rtx (DImode);
34850 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
34852 tmp4 = gen_reg_rtx (DImode);
34853 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
34854 tmp5 = gen_reg_rtx (DImode);
34855 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
34857 tmp6 = gen_reg_rtx (DImode);
34858 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
34859 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
34861 else
34862 rs6000_emit_popcount (tmp, src);
34863 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
34867 /* Expand an Altivec constant permutation for little endian mode.
34868 There are two issues: First, the two input operands must be
34869 swapped so that together they form a double-wide array in LE
34870 order. Second, the vperm instruction has surprising behavior
34871 in LE mode: it interprets the elements of the source vectors
34872 in BE mode ("left to right") and interprets the elements of
34873 the destination vector in LE mode ("right to left"). To
34874 correct for this, we must subtract each element of the permute
34875 control vector from 31.
34877 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34878 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34879 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34880 serve as the permute control vector. Then, in BE mode,
34882 vperm 9,10,11,12
34884 places the desired result in vr9. However, in LE mode the
34885 vector contents will be
34887 vr10 = 00000003 00000002 00000001 00000000
34888 vr11 = 00000007 00000006 00000005 00000004
34890 The result of the vperm using the same permute control vector is
34892 vr9 = 05000000 07000000 01000000 03000000
34894 That is, the leftmost 4 bytes of vr10 are interpreted as the
34895 source for the rightmost 4 bytes of vr9, and so on.
34897 If we change the permute control vector to
34899 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
34901 and issue
34903 vperm 9,11,10,12
34905 we get the desired
34907 vr9 = 00000006 00000004 00000002 00000000. */
34909 void
34910 altivec_expand_vec_perm_const_le (rtx operands[4])
34912 unsigned int i;
34913 rtx perm[16];
34914 rtx constv, unspec;
34915 rtx target = operands[0];
34916 rtx op0 = operands[1];
34917 rtx op1 = operands[2];
34918 rtx sel = operands[3];
34920 /* Unpack and adjust the constant selector. */
34921 for (i = 0; i < 16; ++i)
34923 rtx e = XVECEXP (sel, 0, i);
34924 unsigned int elt = 31 - (INTVAL (e) & 31);
34925 perm[i] = GEN_INT (elt);
34928 /* Expand to a permute, swapping the inputs and using the
34929 adjusted selector. */
34930 if (!REG_P (op0))
34931 op0 = force_reg (V16QImode, op0);
34932 if (!REG_P (op1))
34933 op1 = force_reg (V16QImode, op1);
34935 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
34936 constv = force_reg (V16QImode, constv);
34937 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
34938 UNSPEC_VPERM);
34939 if (!REG_P (target))
34941 rtx tmp = gen_reg_rtx (V16QImode);
34942 emit_move_insn (tmp, unspec);
34943 unspec = tmp;
34946 emit_move_insn (target, unspec);
34949 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
34950 permute control vector. But here it's not a constant, so we must
34951 generate a vector NAND or NOR to do the adjustment. */
34953 void
34954 altivec_expand_vec_perm_le (rtx operands[4])
34956 rtx notx, iorx, unspec;
34957 rtx target = operands[0];
34958 rtx op0 = operands[1];
34959 rtx op1 = operands[2];
34960 rtx sel = operands[3];
34961 rtx tmp = target;
34962 rtx norreg = gen_reg_rtx (V16QImode);
34963 machine_mode mode = GET_MODE (target);
34965 /* Get everything in regs so the pattern matches. */
34966 if (!REG_P (op0))
34967 op0 = force_reg (mode, op0);
34968 if (!REG_P (op1))
34969 op1 = force_reg (mode, op1);
34970 if (!REG_P (sel))
34971 sel = force_reg (V16QImode, sel);
34972 if (!REG_P (target))
34973 tmp = gen_reg_rtx (mode);
34975 if (TARGET_P9_VECTOR)
34977 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
34978 UNSPEC_VPERMR);
34980 else
34982 /* Invert the selector with a VNAND if available, else a VNOR.
34983 The VNAND is preferred for future fusion opportunities. */
34984 notx = gen_rtx_NOT (V16QImode, sel);
34985 iorx = (TARGET_P8_VECTOR
34986 ? gen_rtx_IOR (V16QImode, notx, notx)
34987 : gen_rtx_AND (V16QImode, notx, notx));
34988 emit_insn (gen_rtx_SET (norreg, iorx));
34990 /* Permute with operands reversed and adjusted selector. */
34991 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
34992 UNSPEC_VPERM);
34995 /* Copy into target, possibly by way of a register. */
34996 if (!REG_P (target))
34998 emit_move_insn (tmp, unspec);
34999 unspec = tmp;
35002 emit_move_insn (target, unspec);
35005 /* Expand an Altivec constant permutation. Return true if we match
35006 an efficient implementation; false to fall back to VPERM. */
35008 bool
35009 altivec_expand_vec_perm_const (rtx operands[4])
35011 struct altivec_perm_insn {
35012 HOST_WIDE_INT mask;
35013 enum insn_code impl;
35014 unsigned char perm[16];
35016 static const struct altivec_perm_insn patterns[] = {
35017 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35018 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35019 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35020 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35021 { OPTION_MASK_ALTIVEC,
35022 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35023 : CODE_FOR_altivec_vmrglb_direct),
35024 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35025 { OPTION_MASK_ALTIVEC,
35026 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35027 : CODE_FOR_altivec_vmrglh_direct),
35028 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35029 { OPTION_MASK_ALTIVEC,
35030 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35031 : CODE_FOR_altivec_vmrglw_direct),
35032 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35033 { OPTION_MASK_ALTIVEC,
35034 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35035 : CODE_FOR_altivec_vmrghb_direct),
35036 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35037 { OPTION_MASK_ALTIVEC,
35038 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35039 : CODE_FOR_altivec_vmrghh_direct),
35040 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35041 { OPTION_MASK_ALTIVEC,
35042 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35043 : CODE_FOR_altivec_vmrghw_direct),
35044 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35045 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
35046 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35047 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
35048 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35051 unsigned int i, j, elt, which;
35052 unsigned char perm[16];
35053 rtx target, op0, op1, sel, x;
35054 bool one_vec;
35056 target = operands[0];
35057 op0 = operands[1];
35058 op1 = operands[2];
35059 sel = operands[3];
35061 /* Unpack the constant selector. */
35062 for (i = which = 0; i < 16; ++i)
35064 rtx e = XVECEXP (sel, 0, i);
35065 elt = INTVAL (e) & 31;
35066 which |= (elt < 16 ? 1 : 2);
35067 perm[i] = elt;
35070 /* Simplify the constant selector based on operands. */
35071 switch (which)
35073 default:
35074 gcc_unreachable ();
35076 case 3:
35077 one_vec = false;
35078 if (!rtx_equal_p (op0, op1))
35079 break;
35080 /* FALLTHRU */
35082 case 2:
35083 for (i = 0; i < 16; ++i)
35084 perm[i] &= 15;
35085 op0 = op1;
35086 one_vec = true;
35087 break;
35089 case 1:
35090 op1 = op0;
35091 one_vec = true;
35092 break;
35095 /* Look for splat patterns. */
35096 if (one_vec)
35098 elt = perm[0];
35100 for (i = 0; i < 16; ++i)
35101 if (perm[i] != elt)
35102 break;
35103 if (i == 16)
35105 if (!BYTES_BIG_ENDIAN)
35106 elt = 15 - elt;
35107 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35108 return true;
35111 if (elt % 2 == 0)
35113 for (i = 0; i < 16; i += 2)
35114 if (perm[i] != elt || perm[i + 1] != elt + 1)
35115 break;
35116 if (i == 16)
35118 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35119 x = gen_reg_rtx (V8HImode);
35120 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35121 GEN_INT (field)));
35122 emit_move_insn (target, gen_lowpart (V16QImode, x));
35123 return true;
35127 if (elt % 4 == 0)
35129 for (i = 0; i < 16; i += 4)
35130 if (perm[i] != elt
35131 || perm[i + 1] != elt + 1
35132 || perm[i + 2] != elt + 2
35133 || perm[i + 3] != elt + 3)
35134 break;
35135 if (i == 16)
35137 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35138 x = gen_reg_rtx (V4SImode);
35139 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35140 GEN_INT (field)));
35141 emit_move_insn (target, gen_lowpart (V16QImode, x));
35142 return true;
35147 /* Look for merge and pack patterns. */
35148 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35150 bool swapped;
35152 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35153 continue;
35155 elt = patterns[j].perm[0];
35156 if (perm[0] == elt)
35157 swapped = false;
35158 else if (perm[0] == elt + 16)
35159 swapped = true;
35160 else
35161 continue;
35162 for (i = 1; i < 16; ++i)
35164 elt = patterns[j].perm[i];
35165 if (swapped)
35166 elt = (elt >= 16 ? elt - 16 : elt + 16);
35167 else if (one_vec && elt >= 16)
35168 elt -= 16;
35169 if (perm[i] != elt)
35170 break;
35172 if (i == 16)
35174 enum insn_code icode = patterns[j].impl;
35175 machine_mode omode = insn_data[icode].operand[0].mode;
35176 machine_mode imode = insn_data[icode].operand[1].mode;
35178 /* For little-endian, don't use vpkuwum and vpkuhum if the
35179 underlying vector type is not V4SI and V8HI, respectively.
35180 For example, using vpkuwum with a V8HI picks up the even
35181 halfwords (BE numbering) when the even halfwords (LE
35182 numbering) are what we need. */
35183 if (!BYTES_BIG_ENDIAN
35184 && icode == CODE_FOR_altivec_vpkuwum_direct
35185 && ((GET_CODE (op0) == REG
35186 && GET_MODE (op0) != V4SImode)
35187 || (GET_CODE (op0) == SUBREG
35188 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35189 continue;
35190 if (!BYTES_BIG_ENDIAN
35191 && icode == CODE_FOR_altivec_vpkuhum_direct
35192 && ((GET_CODE (op0) == REG
35193 && GET_MODE (op0) != V8HImode)
35194 || (GET_CODE (op0) == SUBREG
35195 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35196 continue;
35198 /* For little-endian, the two input operands must be swapped
35199 (or swapped back) to ensure proper right-to-left numbering
35200 from 0 to 2N-1. */
35201 if (swapped ^ !BYTES_BIG_ENDIAN)
35202 std::swap (op0, op1);
35203 if (imode != V16QImode)
35205 op0 = gen_lowpart (imode, op0);
35206 op1 = gen_lowpart (imode, op1);
35208 if (omode == V16QImode)
35209 x = target;
35210 else
35211 x = gen_reg_rtx (omode);
35212 emit_insn (GEN_FCN (icode) (x, op0, op1));
35213 if (omode != V16QImode)
35214 emit_move_insn (target, gen_lowpart (V16QImode, x));
35215 return true;
35219 if (!BYTES_BIG_ENDIAN)
35221 altivec_expand_vec_perm_const_le (operands);
35222 return true;
35225 return false;
35228 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
35229 Return true if we match an efficient implementation. */
35231 static bool
35232 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35233 unsigned char perm0, unsigned char perm1)
35235 rtx x;
35237 /* If both selectors come from the same operand, fold to single op. */
35238 if ((perm0 & 2) == (perm1 & 2))
35240 if (perm0 & 2)
35241 op0 = op1;
35242 else
35243 op1 = op0;
35245 /* If both operands are equal, fold to simpler permutation. */
35246 if (rtx_equal_p (op0, op1))
35248 perm0 = perm0 & 1;
35249 perm1 = (perm1 & 1) + 2;
35251 /* If the first selector comes from the second operand, swap. */
35252 else if (perm0 & 2)
35254 if (perm1 & 2)
35255 return false;
35256 perm0 -= 2;
35257 perm1 += 2;
35258 std::swap (op0, op1);
35260 /* If the second selector does not come from the second operand, fail. */
35261 else if ((perm1 & 2) == 0)
35262 return false;
35264 /* Success! */
35265 if (target != NULL)
35267 machine_mode vmode, dmode;
35268 rtvec v;
35270 vmode = GET_MODE (target);
35271 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35272 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
35273 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35274 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35275 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35276 emit_insn (gen_rtx_SET (target, x));
35278 return true;
35281 bool
35282 rs6000_expand_vec_perm_const (rtx operands[4])
35284 rtx target, op0, op1, sel;
35285 unsigned char perm0, perm1;
35287 target = operands[0];
35288 op0 = operands[1];
35289 op1 = operands[2];
35290 sel = operands[3];
35292 /* Unpack the constant selector. */
35293 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35294 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35296 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35299 /* Test whether a constant permutation is supported. */
35301 static bool
35302 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
35303 const unsigned char *sel)
35305 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35306 if (TARGET_ALTIVEC)
35307 return true;
35309 /* Check for ps_merge* or evmerge* insns. */
35310 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35311 || (TARGET_SPE && vmode == V2SImode))
35313 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35314 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35315 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35318 return false;
35321 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35323 static void
35324 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35325 machine_mode vmode, unsigned nelt, rtx perm[])
35327 machine_mode imode;
35328 rtx x;
35330 imode = vmode;
35331 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
35333 imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
35334 imode = mode_for_vector (imode, nelt);
35337 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
35338 x = expand_vec_perm (vmode, op0, op1, x, target);
35339 if (x != target)
35340 emit_move_insn (target, x);
35343 /* Expand an extract even operation. */
35345 void
35346 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35348 machine_mode vmode = GET_MODE (target);
35349 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35350 rtx perm[16];
35352 for (i = 0; i < nelt; i++)
35353 perm[i] = GEN_INT (i * 2);
35355 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35358 /* Expand a vector interleave operation. */
35360 void
35361 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35363 machine_mode vmode = GET_MODE (target);
35364 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35365 rtx perm[16];
35367 high = (highp ? 0 : nelt / 2);
35368 for (i = 0; i < nelt / 2; i++)
35370 perm[i * 2] = GEN_INT (i + high);
35371 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
35374 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35377 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35378 void
35379 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35381 HOST_WIDE_INT hwi_scale (scale);
35382 REAL_VALUE_TYPE r_pow;
35383 rtvec v = rtvec_alloc (2);
35384 rtx elt;
35385 rtx scale_vec = gen_reg_rtx (V2DFmode);
35386 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35387 elt = const_double_from_real_value (r_pow, DFmode);
35388 RTVEC_ELT (v, 0) = elt;
35389 RTVEC_ELT (v, 1) = elt;
35390 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35391 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35394 /* Return an RTX representing where to find the function value of a
35395 function returning MODE. */
35396 static rtx
35397 rs6000_complex_function_value (machine_mode mode)
35399 unsigned int regno;
35400 rtx r1, r2;
35401 machine_mode inner = GET_MODE_INNER (mode);
35402 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35404 if (TARGET_FLOAT128
35405 && (mode == KCmode
35406 || (mode == TCmode && TARGET_IEEEQUAD)))
35407 regno = ALTIVEC_ARG_RETURN;
35409 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35410 regno = FP_ARG_RETURN;
35412 else
35414 regno = GP_ARG_RETURN;
35416 /* 32-bit is OK since it'll go in r3/r4. */
35417 if (TARGET_32BIT && inner_bytes >= 4)
35418 return gen_rtx_REG (mode, regno);
35421 if (inner_bytes >= 8)
35422 return gen_rtx_REG (mode, regno);
35424 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35425 const0_rtx);
35426 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35427 GEN_INT (inner_bytes));
35428 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35431 /* Return an rtx describing a return value of MODE as a PARALLEL
35432 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35433 stride REG_STRIDE. */
35435 static rtx
35436 rs6000_parallel_return (machine_mode mode,
35437 int n_elts, machine_mode elt_mode,
35438 unsigned int regno, unsigned int reg_stride)
35440 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35442 int i;
35443 for (i = 0; i < n_elts; i++)
35445 rtx r = gen_rtx_REG (elt_mode, regno);
35446 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35447 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35448 regno += reg_stride;
35451 return par;
35454 /* Target hook for TARGET_FUNCTION_VALUE.
35456 On the SPE, both FPs and vectors are returned in r3.
35458 On RS/6000 an integer value is in r3 and a floating-point value is in
35459 fp1, unless -msoft-float. */
35461 static rtx
35462 rs6000_function_value (const_tree valtype,
35463 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35464 bool outgoing ATTRIBUTE_UNUSED)
35466 machine_mode mode;
35467 unsigned int regno;
35468 machine_mode elt_mode;
35469 int n_elts;
35471 /* Special handling for structs in darwin64. */
35472 if (TARGET_MACHO
35473 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35475 CUMULATIVE_ARGS valcum;
35476 rtx valret;
35478 valcum.words = 0;
35479 valcum.fregno = FP_ARG_MIN_REG;
35480 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35481 /* Do a trial code generation as if this were going to be passed as
35482 an argument; if any part goes in memory, we return NULL. */
35483 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35484 if (valret)
35485 return valret;
35486 /* Otherwise fall through to standard ABI rules. */
35489 mode = TYPE_MODE (valtype);
35491 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35492 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35494 int first_reg, n_regs;
35496 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35498 /* _Decimal128 must use even/odd register pairs. */
35499 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35500 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35502 else
35504 first_reg = ALTIVEC_ARG_RETURN;
35505 n_regs = 1;
35508 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35511 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35512 if (TARGET_32BIT && TARGET_POWERPC64)
35513 switch (mode)
35515 default:
35516 break;
35517 case DImode:
35518 case SCmode:
35519 case DCmode:
35520 case TCmode:
35521 int count = GET_MODE_SIZE (mode) / 4;
35522 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35525 if ((INTEGRAL_TYPE_P (valtype)
35526 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35527 || POINTER_TYPE_P (valtype))
35528 mode = TARGET_32BIT ? SImode : DImode;
35530 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35531 /* _Decimal128 must use an even/odd register pair. */
35532 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35533 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
35534 && !FLOAT128_VECTOR_P (mode)
35535 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35536 regno = FP_ARG_RETURN;
35537 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35538 && targetm.calls.split_complex_arg)
35539 return rs6000_complex_function_value (mode);
35540 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35541 return register is used in both cases, and we won't see V2DImode/V2DFmode
35542 for pure altivec, combine the two cases. */
35543 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35544 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35545 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35546 regno = ALTIVEC_ARG_RETURN;
35547 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
35548 && (mode == DFmode || mode == DCmode
35549 || FLOAT128_IBM_P (mode) || mode == TCmode))
35550 return spe_build_register_parallel (mode, GP_ARG_RETURN);
35551 else
35552 regno = GP_ARG_RETURN;
35554 return gen_rtx_REG (mode, regno);
35557 /* Define how to find the value returned by a library function
35558 assuming the value has mode MODE. */
35560 rs6000_libcall_value (machine_mode mode)
35562 unsigned int regno;
35564 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35565 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35566 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35568 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35569 /* _Decimal128 must use an even/odd register pair. */
35570 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35571 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35572 && TARGET_HARD_FLOAT && TARGET_FPRS
35573 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35574 regno = FP_ARG_RETURN;
35575 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35576 return register is used in both cases, and we won't see V2DImode/V2DFmode
35577 for pure altivec, combine the two cases. */
35578 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35579 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35580 regno = ALTIVEC_ARG_RETURN;
35581 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35582 return rs6000_complex_function_value (mode);
35583 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
35584 && (mode == DFmode || mode == DCmode
35585 || FLOAT128_IBM_P (mode) || mode == TCmode))
35586 return spe_build_register_parallel (mode, GP_ARG_RETURN);
35587 else
35588 regno = GP_ARG_RETURN;
35590 return gen_rtx_REG (mode, regno);
35594 /* Return true if we use LRA instead of reload pass. */
35595 static bool
35596 rs6000_lra_p (void)
35598 return TARGET_LRA;
35601 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35602 Frame pointer elimination is automatically handled.
35604 For the RS/6000, if frame pointer elimination is being done, we would like
35605 to convert ap into fp, not sp.
35607 We need r30 if -mminimal-toc was specified, and there are constant pool
35608 references. */
35610 static bool
35611 rs6000_can_eliminate (const int from, const int to)
35613 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35614 ? ! frame_pointer_needed
35615 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35616 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
35617 : true);
35620 /* Define the offset between two registers, FROM to be eliminated and its
35621 replacement TO, at the start of a routine. */
35622 HOST_WIDE_INT
35623 rs6000_initial_elimination_offset (int from, int to)
35625 rs6000_stack_t *info = rs6000_stack_info ();
35626 HOST_WIDE_INT offset;
35628 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35629 offset = info->push_p ? 0 : -info->total_size;
35630 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35632 offset = info->push_p ? 0 : -info->total_size;
35633 if (FRAME_GROWS_DOWNWARD)
35634 offset += info->fixed_size + info->vars_size + info->parm_size;
35636 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35637 offset = FRAME_GROWS_DOWNWARD
35638 ? info->fixed_size + info->vars_size + info->parm_size
35639 : 0;
35640 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35641 offset = info->total_size;
35642 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35643 offset = info->push_p ? info->total_size : 0;
35644 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35645 offset = 0;
35646 else
35647 gcc_unreachable ();
35649 return offset;
35652 static rtx
35653 rs6000_dwarf_register_span (rtx reg)
35655 rtx parts[8];
35656 int i, words;
35657 unsigned regno = REGNO (reg);
35658 machine_mode mode = GET_MODE (reg);
35660 if (TARGET_SPE
35661 && regno < 32
35662 && (SPE_VECTOR_MODE (GET_MODE (reg))
35663 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
35664 && mode != SFmode && mode != SDmode && mode != SCmode)))
35666 else
35667 return NULL_RTX;
35669 regno = REGNO (reg);
35671 /* The duality of the SPE register size wreaks all kinds of havoc.
35672 This is a way of distinguishing r0 in 32-bits from r0 in
35673 64-bits. */
35674 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
35675 gcc_assert (words <= 4);
35676 for (i = 0; i < words; i++, regno++)
35678 if (BYTES_BIG_ENDIAN)
35680 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
35681 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
35683 else
35685 parts[2 * i] = gen_rtx_REG (SImode, regno);
35686 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
35690 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
35693 /* Fill in sizes for SPE register high parts in table used by unwinder. */
35695 static void
35696 rs6000_init_dwarf_reg_sizes_extra (tree address)
35698 if (TARGET_SPE)
35700 int i;
35701 machine_mode mode = TYPE_MODE (char_type_node);
35702 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35703 rtx mem = gen_rtx_MEM (BLKmode, addr);
35704 rtx value = gen_int_mode (4, mode);
35706 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
35708 int column = DWARF_REG_TO_UNWIND_COLUMN
35709 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35710 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35712 emit_move_insn (adjust_address (mem, mode, offset), value);
35716 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35718 int i;
35719 machine_mode mode = TYPE_MODE (char_type_node);
35720 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35721 rtx mem = gen_rtx_MEM (BLKmode, addr);
35722 rtx value = gen_int_mode (16, mode);
35724 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35725 The unwinder still needs to know the size of Altivec registers. */
35727 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35729 int column = DWARF_REG_TO_UNWIND_COLUMN
35730 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35731 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35733 emit_move_insn (adjust_address (mem, mode, offset), value);
35738 /* Map internal gcc register numbers to debug format register numbers.
35739 FORMAT specifies the type of debug register number to use:
35740 0 -- debug information, except for frame-related sections
35741 1 -- DWARF .debug_frame section
35742 2 -- DWARF .eh_frame section */
35744 unsigned int
35745 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35747 /* We never use the GCC internal number for SPE high registers.
35748 Those are mapped to the 1200..1231 range for all debug formats. */
35749 if (SPE_HIGH_REGNO_P (regno))
35750 return regno - FIRST_SPE_HIGH_REGNO + 1200;
35752 /* Except for the above, we use the internal number for non-DWARF
35753 debug information, and also for .eh_frame. */
35754 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35755 return regno;
35757 /* On some platforms, we use the standard DWARF register
35758 numbering for .debug_info and .debug_frame. */
35759 #ifdef RS6000_USE_DWARF_NUMBERING
35760 if (regno <= 63)
35761 return regno;
35762 if (regno == LR_REGNO)
35763 return 108;
35764 if (regno == CTR_REGNO)
35765 return 109;
35766 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35767 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35768 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35769 to the DWARF reg for CR. */
35770 if (format == 1 && regno == CR2_REGNO)
35771 return 64;
35772 if (CR_REGNO_P (regno))
35773 return regno - CR0_REGNO + 86;
35774 if (regno == CA_REGNO)
35775 return 101; /* XER */
35776 if (ALTIVEC_REGNO_P (regno))
35777 return regno - FIRST_ALTIVEC_REGNO + 1124;
35778 if (regno == VRSAVE_REGNO)
35779 return 356;
35780 if (regno == VSCR_REGNO)
35781 return 67;
35782 if (regno == SPE_ACC_REGNO)
35783 return 99;
35784 if (regno == SPEFSCR_REGNO)
35785 return 612;
35786 #endif
35787 return regno;
35790 /* target hook eh_return_filter_mode */
35791 static machine_mode
35792 rs6000_eh_return_filter_mode (void)
35794 return TARGET_32BIT ? SImode : word_mode;
35797 /* Target hook for scalar_mode_supported_p. */
35798 static bool
35799 rs6000_scalar_mode_supported_p (machine_mode mode)
35801 /* -m32 does not support TImode. This is the default, from
35802 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35803 same ABI as for -m32. But default_scalar_mode_supported_p allows
35804 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35805 for -mpowerpc64. */
35806 if (TARGET_32BIT && mode == TImode)
35807 return false;
35809 if (DECIMAL_FLOAT_MODE_P (mode))
35810 return default_decimal_float_supported_p ();
35811 else if (TARGET_FLOAT128 && (mode == KFmode || mode == IFmode))
35812 return true;
35813 else
35814 return default_scalar_mode_supported_p (mode);
35817 /* Target hook for vector_mode_supported_p. */
35818 static bool
35819 rs6000_vector_mode_supported_p (machine_mode mode)
35822 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
35823 return true;
35825 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
35826 return true;
35828 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35829 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35830 double-double. */
35831 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35832 return true;
35834 else
35835 return false;
35838 /* Target hook for c_mode_for_suffix. */
35839 static machine_mode
35840 rs6000_c_mode_for_suffix (char suffix)
35842 if (TARGET_FLOAT128)
35844 if (suffix == 'q' || suffix == 'Q')
35845 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35847 /* At the moment, we are not defining a suffix for IBM extended double.
35848 If/when the default for -mabi=ieeelongdouble is changed, and we want
35849 to support __ibm128 constants in legacy library code, we may need to
35850 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35851 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35852 __float80 constants. */
35855 return VOIDmode;
35858 /* Target hook for invalid_arg_for_unprototyped_fn. */
35859 static const char *
35860 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
35862 return (!rs6000_darwin64_abi
35863 && typelist == 0
35864 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
35865 && (funcdecl == NULL_TREE
35866 || (TREE_CODE (funcdecl) == FUNCTION_DECL
35867 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
35868 ? N_("AltiVec argument passed to unprototyped function")
35869 : NULL;
35872 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35873 setup by using __stack_chk_fail_local hidden function instead of
35874 calling __stack_chk_fail directly. Otherwise it is better to call
35875 __stack_chk_fail directly. */
35877 static tree ATTRIBUTE_UNUSED
35878 rs6000_stack_protect_fail (void)
35880 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
35881 ? default_hidden_stack_protect_fail ()
35882 : default_external_stack_protect_fail ();
35885 void
35886 rs6000_final_prescan_insn (rtx_insn *insn, rtx *operand ATTRIBUTE_UNUSED,
35887 int num_operands ATTRIBUTE_UNUSED)
35889 if (rs6000_warn_cell_microcode)
35891 const char *temp;
35892 int insn_code_number = recog_memoized (insn);
35893 location_t location = INSN_LOCATION (insn);
35895 /* Punt on insns we cannot recognize. */
35896 if (insn_code_number < 0)
35897 return;
35899 temp = get_insn_template (insn_code_number, insn);
35901 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
35902 warning_at (location, OPT_mwarn_cell_microcode,
35903 "emitting microcode insn %s\t[%s] #%d",
35904 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
35905 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
35906 warning_at (location, OPT_mwarn_cell_microcode,
35907 "emitting conditional microcode insn %s\t[%s] #%d",
35908 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
35912 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35914 #if TARGET_ELF
35915 static unsigned HOST_WIDE_INT
35916 rs6000_asan_shadow_offset (void)
35918 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
35920 #endif
35922 /* Mask options that we want to support inside of attribute((target)) and
35923 #pragma GCC target operations. Note, we do not include things like
35924 64/32-bit, endianess, hard/soft floating point, etc. that would have
35925 different calling sequences. */
35927 struct rs6000_opt_mask {
35928 const char *name; /* option name */
35929 HOST_WIDE_INT mask; /* mask to set */
35930 bool invert; /* invert sense of mask */
35931 bool valid_target; /* option is a target option */
35934 static struct rs6000_opt_mask const rs6000_opt_masks[] =
35936 { "altivec", OPTION_MASK_ALTIVEC, false, true },
35937 { "cmpb", OPTION_MASK_CMPB, false, true },
35938 { "crypto", OPTION_MASK_CRYPTO, false, true },
35939 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
35940 { "dlmzb", OPTION_MASK_DLMZB, false, true },
35941 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
35942 false, true },
35943 { "float128", OPTION_MASK_FLOAT128, false, false },
35944 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
35945 { "fprnd", OPTION_MASK_FPRND, false, true },
35946 { "hard-dfp", OPTION_MASK_DFP, false, true },
35947 { "htm", OPTION_MASK_HTM, false, true },
35948 { "isel", OPTION_MASK_ISEL, false, true },
35949 { "mfcrf", OPTION_MASK_MFCRF, false, true },
35950 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
35951 { "modulo", OPTION_MASK_MODULO, false, true },
35952 { "mulhw", OPTION_MASK_MULHW, false, true },
35953 { "multiple", OPTION_MASK_MULTIPLE, false, true },
35954 { "popcntb", OPTION_MASK_POPCNTB, false, true },
35955 { "popcntd", OPTION_MASK_POPCNTD, false, true },
35956 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
35957 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
35958 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
35959 { "power9-dform-scalar", OPTION_MASK_P9_DFORM_SCALAR, false, true },
35960 { "power9-dform-vector", OPTION_MASK_P9_DFORM_VECTOR, false, true },
35961 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
35962 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
35963 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
35964 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
35965 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
35966 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
35967 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
35968 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
35969 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
35970 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
35971 { "string", OPTION_MASK_STRING, false, true },
35972 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
35973 { "update", OPTION_MASK_NO_UPDATE, true , true },
35974 { "upper-regs-di", OPTION_MASK_UPPER_REGS_DI, false, true },
35975 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, true },
35976 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, true },
35977 { "vsx", OPTION_MASK_VSX, false, true },
35978 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
35979 #ifdef OPTION_MASK_64BIT
35980 #if TARGET_AIX_OS
35981 { "aix64", OPTION_MASK_64BIT, false, false },
35982 { "aix32", OPTION_MASK_64BIT, true, false },
35983 #else
35984 { "64", OPTION_MASK_64BIT, false, false },
35985 { "32", OPTION_MASK_64BIT, true, false },
35986 #endif
35987 #endif
35988 #ifdef OPTION_MASK_EABI
35989 { "eabi", OPTION_MASK_EABI, false, false },
35990 #endif
35991 #ifdef OPTION_MASK_LITTLE_ENDIAN
35992 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
35993 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
35994 #endif
35995 #ifdef OPTION_MASK_RELOCATABLE
35996 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
35997 #endif
35998 #ifdef OPTION_MASK_STRICT_ALIGN
35999 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36000 #endif
36001 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36002 { "string", OPTION_MASK_STRING, false, false },
36005 /* Builtin mask mapping for printing the flags. */
36006 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36008 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36009 { "vsx", RS6000_BTM_VSX, false, false },
36010 { "spe", RS6000_BTM_SPE, false, false },
36011 { "paired", RS6000_BTM_PAIRED, false, false },
36012 { "fre", RS6000_BTM_FRE, false, false },
36013 { "fres", RS6000_BTM_FRES, false, false },
36014 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36015 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36016 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36017 { "cell", RS6000_BTM_CELL, false, false },
36018 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36019 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36020 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36021 { "crypto", RS6000_BTM_CRYPTO, false, false },
36022 { "htm", RS6000_BTM_HTM, false, false },
36023 { "hard-dfp", RS6000_BTM_DFP, false, false },
36024 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36025 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36026 { "float128", RS6000_BTM_FLOAT128, false, false },
36029 /* Option variables that we want to support inside attribute((target)) and
36030 #pragma GCC target operations. */
36032 struct rs6000_opt_var {
36033 const char *name; /* option name */
36034 size_t global_offset; /* offset of the option in global_options. */
36035 size_t target_offset; /* offset of the option in target options. */
36038 static struct rs6000_opt_var const rs6000_opt_vars[] =
36040 { "friz",
36041 offsetof (struct gcc_options, x_TARGET_FRIZ),
36042 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36043 { "avoid-indexed-addresses",
36044 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36045 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36046 { "paired",
36047 offsetof (struct gcc_options, x_rs6000_paired_float),
36048 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36049 { "longcall",
36050 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36051 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36052 { "optimize-swaps",
36053 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36054 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36055 { "allow-movmisalign",
36056 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36057 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36058 { "allow-df-permute",
36059 offsetof (struct gcc_options, x_TARGET_ALLOW_DF_PERMUTE),
36060 offsetof (struct cl_target_option, x_TARGET_ALLOW_DF_PERMUTE), },
36061 { "sched-groups",
36062 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36063 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36064 { "always-hint",
36065 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36066 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36067 { "align-branch-targets",
36068 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36069 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36070 { "vectorize-builtins",
36071 offsetof (struct gcc_options, x_TARGET_VECTORIZE_BUILTINS),
36072 offsetof (struct cl_target_option, x_TARGET_VECTORIZE_BUILTINS), },
36073 { "tls-markers",
36074 offsetof (struct gcc_options, x_tls_markers),
36075 offsetof (struct cl_target_option, x_tls_markers), },
36076 { "sched-prolog",
36077 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36078 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36079 { "sched-epilog",
36080 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36081 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36082 { "gen-cell-microcode",
36083 offsetof (struct gcc_options, x_rs6000_gen_cell_microcode),
36084 offsetof (struct cl_target_option, x_rs6000_gen_cell_microcode), },
36085 { "warn-cell-microcode",
36086 offsetof (struct gcc_options, x_rs6000_warn_cell_microcode),
36087 offsetof (struct cl_target_option, x_rs6000_warn_cell_microcode), },
36090 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36091 parsing. Return true if there were no errors. */
36093 static bool
36094 rs6000_inner_target_options (tree args, bool attr_p)
36096 bool ret = true;
36098 if (args == NULL_TREE)
36101 else if (TREE_CODE (args) == STRING_CST)
36103 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36104 char *q;
36106 while ((q = strtok (p, ",")) != NULL)
36108 bool error_p = false;
36109 bool not_valid_p = false;
36110 const char *cpu_opt = NULL;
36112 p = NULL;
36113 if (strncmp (q, "cpu=", 4) == 0)
36115 int cpu_index = rs6000_cpu_name_lookup (q+4);
36116 if (cpu_index >= 0)
36117 rs6000_cpu_index = cpu_index;
36118 else
36120 error_p = true;
36121 cpu_opt = q+4;
36124 else if (strncmp (q, "tune=", 5) == 0)
36126 int tune_index = rs6000_cpu_name_lookup (q+5);
36127 if (tune_index >= 0)
36128 rs6000_tune_index = tune_index;
36129 else
36131 error_p = true;
36132 cpu_opt = q+5;
36135 else
36137 size_t i;
36138 bool invert = false;
36139 char *r = q;
36141 error_p = true;
36142 if (strncmp (r, "no-", 3) == 0)
36144 invert = true;
36145 r += 3;
36148 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36149 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36151 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36153 if (!rs6000_opt_masks[i].valid_target)
36154 not_valid_p = true;
36155 else
36157 error_p = false;
36158 rs6000_isa_flags_explicit |= mask;
36160 /* VSX needs altivec, so -mvsx automagically sets
36161 altivec and disables -mavoid-indexed-addresses. */
36162 if (!invert)
36164 if (mask == OPTION_MASK_VSX)
36166 mask |= OPTION_MASK_ALTIVEC;
36167 TARGET_AVOID_XFORM = 0;
36171 if (rs6000_opt_masks[i].invert)
36172 invert = !invert;
36174 if (invert)
36175 rs6000_isa_flags &= ~mask;
36176 else
36177 rs6000_isa_flags |= mask;
36179 break;
36182 if (error_p && !not_valid_p)
36184 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36185 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36187 size_t j = rs6000_opt_vars[i].global_offset;
36188 *((int *) ((char *)&global_options + j)) = !invert;
36189 error_p = false;
36190 not_valid_p = false;
36191 break;
36196 if (error_p)
36198 const char *eprefix, *esuffix;
36200 ret = false;
36201 if (attr_p)
36203 eprefix = "__attribute__((__target__(";
36204 esuffix = ")))";
36206 else
36208 eprefix = "#pragma GCC target ";
36209 esuffix = "";
36212 if (cpu_opt)
36213 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
36214 q, esuffix);
36215 else if (not_valid_p)
36216 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
36217 else
36218 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
36223 else if (TREE_CODE (args) == TREE_LIST)
36227 tree value = TREE_VALUE (args);
36228 if (value)
36230 bool ret2 = rs6000_inner_target_options (value, attr_p);
36231 if (!ret2)
36232 ret = false;
36234 args = TREE_CHAIN (args);
36236 while (args != NULL_TREE);
36239 else
36240 gcc_unreachable ();
36242 return ret;
36245 /* Print out the target options as a list for -mdebug=target. */
36247 static void
36248 rs6000_debug_target_options (tree args, const char *prefix)
36250 if (args == NULL_TREE)
36251 fprintf (stderr, "%s<NULL>", prefix);
36253 else if (TREE_CODE (args) == STRING_CST)
36255 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36256 char *q;
36258 while ((q = strtok (p, ",")) != NULL)
36260 p = NULL;
36261 fprintf (stderr, "%s\"%s\"", prefix, q);
36262 prefix = ", ";
36266 else if (TREE_CODE (args) == TREE_LIST)
36270 tree value = TREE_VALUE (args);
36271 if (value)
36273 rs6000_debug_target_options (value, prefix);
36274 prefix = ", ";
36276 args = TREE_CHAIN (args);
36278 while (args != NULL_TREE);
36281 else
36282 gcc_unreachable ();
36284 return;
36288 /* Hook to validate attribute((target("..."))). */
36290 static bool
36291 rs6000_valid_attribute_p (tree fndecl,
36292 tree ARG_UNUSED (name),
36293 tree args,
36294 int flags)
36296 struct cl_target_option cur_target;
36297 bool ret;
36298 tree old_optimize = build_optimization_node (&global_options);
36299 tree new_target, new_optimize;
36300 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36302 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36304 if (TARGET_DEBUG_TARGET)
36306 tree tname = DECL_NAME (fndecl);
36307 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36308 if (tname)
36309 fprintf (stderr, "function: %.*s\n",
36310 (int) IDENTIFIER_LENGTH (tname),
36311 IDENTIFIER_POINTER (tname));
36312 else
36313 fprintf (stderr, "function: unknown\n");
36315 fprintf (stderr, "args:");
36316 rs6000_debug_target_options (args, " ");
36317 fprintf (stderr, "\n");
36319 if (flags)
36320 fprintf (stderr, "flags: 0x%x\n", flags);
36322 fprintf (stderr, "--------------------\n");
36325 old_optimize = build_optimization_node (&global_options);
36326 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36328 /* If the function changed the optimization levels as well as setting target
36329 options, start with the optimizations specified. */
36330 if (func_optimize && func_optimize != old_optimize)
36331 cl_optimization_restore (&global_options,
36332 TREE_OPTIMIZATION (func_optimize));
36334 /* The target attributes may also change some optimization flags, so update
36335 the optimization options if necessary. */
36336 cl_target_option_save (&cur_target, &global_options);
36337 rs6000_cpu_index = rs6000_tune_index = -1;
36338 ret = rs6000_inner_target_options (args, true);
36340 /* Set up any additional state. */
36341 if (ret)
36343 ret = rs6000_option_override_internal (false);
36344 new_target = build_target_option_node (&global_options);
36346 else
36347 new_target = NULL;
36349 new_optimize = build_optimization_node (&global_options);
36351 if (!new_target)
36352 ret = false;
36354 else if (fndecl)
36356 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36358 if (old_optimize != new_optimize)
36359 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36362 cl_target_option_restore (&global_options, &cur_target);
36364 if (old_optimize != new_optimize)
36365 cl_optimization_restore (&global_options,
36366 TREE_OPTIMIZATION (old_optimize));
36368 return ret;
36372 /* Hook to validate the current #pragma GCC target and set the state, and
36373 update the macros based on what was changed. If ARGS is NULL, then
36374 POP_TARGET is used to reset the options. */
36376 bool
36377 rs6000_pragma_target_parse (tree args, tree pop_target)
36379 tree prev_tree = build_target_option_node (&global_options);
36380 tree cur_tree;
36381 struct cl_target_option *prev_opt, *cur_opt;
36382 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36383 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36385 if (TARGET_DEBUG_TARGET)
36387 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36388 fprintf (stderr, "args:");
36389 rs6000_debug_target_options (args, " ");
36390 fprintf (stderr, "\n");
36392 if (pop_target)
36394 fprintf (stderr, "pop_target:\n");
36395 debug_tree (pop_target);
36397 else
36398 fprintf (stderr, "pop_target: <NULL>\n");
36400 fprintf (stderr, "--------------------\n");
36403 if (! args)
36405 cur_tree = ((pop_target)
36406 ? pop_target
36407 : target_option_default_node);
36408 cl_target_option_restore (&global_options,
36409 TREE_TARGET_OPTION (cur_tree));
36411 else
36413 rs6000_cpu_index = rs6000_tune_index = -1;
36414 if (!rs6000_inner_target_options (args, false)
36415 || !rs6000_option_override_internal (false)
36416 || (cur_tree = build_target_option_node (&global_options))
36417 == NULL_TREE)
36419 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36420 fprintf (stderr, "invalid pragma\n");
36422 return false;
36426 target_option_current_node = cur_tree;
36428 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36429 change the macros that are defined. */
36430 if (rs6000_target_modify_macros_ptr)
36432 prev_opt = TREE_TARGET_OPTION (prev_tree);
36433 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36434 prev_flags = prev_opt->x_rs6000_isa_flags;
36436 cur_opt = TREE_TARGET_OPTION (cur_tree);
36437 cur_flags = cur_opt->x_rs6000_isa_flags;
36438 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36440 diff_bumask = (prev_bumask ^ cur_bumask);
36441 diff_flags = (prev_flags ^ cur_flags);
36443 if ((diff_flags != 0) || (diff_bumask != 0))
36445 /* Delete old macros. */
36446 rs6000_target_modify_macros_ptr (false,
36447 prev_flags & diff_flags,
36448 prev_bumask & diff_bumask);
36450 /* Define new macros. */
36451 rs6000_target_modify_macros_ptr (true,
36452 cur_flags & diff_flags,
36453 cur_bumask & diff_bumask);
36457 return true;
36461 /* Remember the last target of rs6000_set_current_function. */
36462 static GTY(()) tree rs6000_previous_fndecl;
36464 /* Establish appropriate back-end context for processing the function
36465 FNDECL. The argument might be NULL to indicate processing at top
36466 level, outside of any function scope. */
36467 static void
36468 rs6000_set_current_function (tree fndecl)
36470 tree old_tree = (rs6000_previous_fndecl
36471 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
36472 : NULL_TREE);
36474 tree new_tree = (fndecl
36475 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
36476 : NULL_TREE);
36478 if (TARGET_DEBUG_TARGET)
36480 bool print_final = false;
36481 fprintf (stderr, "\n==================== rs6000_set_current_function");
36483 if (fndecl)
36484 fprintf (stderr, ", fndecl %s (%p)",
36485 (DECL_NAME (fndecl)
36486 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36487 : "<unknown>"), (void *)fndecl);
36489 if (rs6000_previous_fndecl)
36490 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36492 fprintf (stderr, "\n");
36493 if (new_tree)
36495 fprintf (stderr, "\nnew fndecl target specific options:\n");
36496 debug_tree (new_tree);
36497 print_final = true;
36500 if (old_tree)
36502 fprintf (stderr, "\nold fndecl target specific options:\n");
36503 debug_tree (old_tree);
36504 print_final = true;
36507 if (print_final)
36508 fprintf (stderr, "--------------------\n");
36511 /* Only change the context if the function changes. This hook is called
36512 several times in the course of compiling a function, and we don't want to
36513 slow things down too much or call target_reinit when it isn't safe. */
36514 if (fndecl && fndecl != rs6000_previous_fndecl)
36516 rs6000_previous_fndecl = fndecl;
36517 if (old_tree == new_tree)
36520 else if (new_tree && new_tree != target_option_default_node)
36522 cl_target_option_restore (&global_options,
36523 TREE_TARGET_OPTION (new_tree));
36524 if (TREE_TARGET_GLOBALS (new_tree))
36525 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36526 else
36527 TREE_TARGET_GLOBALS (new_tree)
36528 = save_target_globals_default_opts ();
36531 else if (old_tree && old_tree != target_option_default_node)
36533 new_tree = target_option_current_node;
36534 cl_target_option_restore (&global_options,
36535 TREE_TARGET_OPTION (new_tree));
36536 if (TREE_TARGET_GLOBALS (new_tree))
36537 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36538 else if (new_tree == target_option_default_node)
36539 restore_target_globals (&default_target_globals);
36540 else
36541 TREE_TARGET_GLOBALS (new_tree)
36542 = save_target_globals_default_opts ();
36548 /* Save the current options */
36550 static void
36551 rs6000_function_specific_save (struct cl_target_option *ptr,
36552 struct gcc_options *opts)
36554 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36555 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36558 /* Restore the current options */
36560 static void
36561 rs6000_function_specific_restore (struct gcc_options *opts,
36562 struct cl_target_option *ptr)
36565 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36566 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36567 (void) rs6000_option_override_internal (false);
36570 /* Print the current options */
36572 static void
36573 rs6000_function_specific_print (FILE *file, int indent,
36574 struct cl_target_option *ptr)
36576 rs6000_print_isa_options (file, indent, "Isa options set",
36577 ptr->x_rs6000_isa_flags);
36579 rs6000_print_isa_options (file, indent, "Isa options explicit",
36580 ptr->x_rs6000_isa_flags_explicit);
36583 /* Helper function to print the current isa or misc options on a line. */
36585 static void
36586 rs6000_print_options_internal (FILE *file,
36587 int indent,
36588 const char *string,
36589 HOST_WIDE_INT flags,
36590 const char *prefix,
36591 const struct rs6000_opt_mask *opts,
36592 size_t num_elements)
36594 size_t i;
36595 size_t start_column = 0;
36596 size_t cur_column;
36597 size_t max_column = 120;
36598 size_t prefix_len = strlen (prefix);
36599 size_t comma_len = 0;
36600 const char *comma = "";
36602 if (indent)
36603 start_column += fprintf (file, "%*s", indent, "");
36605 if (!flags)
36607 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36608 return;
36611 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36613 /* Print the various mask options. */
36614 cur_column = start_column;
36615 for (i = 0; i < num_elements; i++)
36617 bool invert = opts[i].invert;
36618 const char *name = opts[i].name;
36619 const char *no_str = "";
36620 HOST_WIDE_INT mask = opts[i].mask;
36621 size_t len = comma_len + prefix_len + strlen (name);
36623 if (!invert)
36625 if ((flags & mask) == 0)
36627 no_str = "no-";
36628 len += sizeof ("no-") - 1;
36631 flags &= ~mask;
36634 else
36636 if ((flags & mask) != 0)
36638 no_str = "no-";
36639 len += sizeof ("no-") - 1;
36642 flags |= mask;
36645 cur_column += len;
36646 if (cur_column > max_column)
36648 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36649 cur_column = start_column + len;
36650 comma = "";
36653 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36654 comma = ", ";
36655 comma_len = sizeof (", ") - 1;
36658 fputs ("\n", file);
36661 /* Helper function to print the current isa options on a line. */
36663 static void
36664 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36665 HOST_WIDE_INT flags)
36667 rs6000_print_options_internal (file, indent, string, flags, "-m",
36668 &rs6000_opt_masks[0],
36669 ARRAY_SIZE (rs6000_opt_masks));
36672 static void
36673 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36674 HOST_WIDE_INT flags)
36676 rs6000_print_options_internal (file, indent, string, flags, "",
36677 &rs6000_builtin_mask_names[0],
36678 ARRAY_SIZE (rs6000_builtin_mask_names));
36682 /* Hook to determine if one function can safely inline another. */
36684 static bool
36685 rs6000_can_inline_p (tree caller, tree callee)
36687 bool ret = false;
36688 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
36689 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
36691 /* If callee has no option attributes, then it is ok to inline. */
36692 if (!callee_tree)
36693 ret = true;
36695 /* If caller has no option attributes, but callee does then it is not ok to
36696 inline. */
36697 else if (!caller_tree)
36698 ret = false;
36700 else
36702 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
36703 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
36705 /* Callee's options should a subset of the caller's, i.e. a vsx function
36706 can inline an altivec function but a non-vsx function can't inline a
36707 vsx function. */
36708 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
36709 == callee_opts->x_rs6000_isa_flags)
36710 ret = true;
36713 if (TARGET_DEBUG_TARGET)
36714 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
36715 (DECL_NAME (caller)
36716 ? IDENTIFIER_POINTER (DECL_NAME (caller))
36717 : "<unknown>"),
36718 (DECL_NAME (callee)
36719 ? IDENTIFIER_POINTER (DECL_NAME (callee))
36720 : "<unknown>"),
36721 (ret ? "can" : "cannot"));
36723 return ret;
36726 /* Allocate a stack temp and fixup the address so it meets the particular
36727 memory requirements (either offetable or REG+REG addressing). */
36730 rs6000_allocate_stack_temp (machine_mode mode,
36731 bool offsettable_p,
36732 bool reg_reg_p)
36734 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
36735 rtx addr = XEXP (stack, 0);
36736 int strict_p = (reload_in_progress || reload_completed);
36738 if (!legitimate_indirect_address_p (addr, strict_p))
36740 if (offsettable_p
36741 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
36742 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
36744 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
36745 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
36748 return stack;
36751 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
36752 to such a form to deal with memory reference instructions like STFIWX that
36753 only take reg+reg addressing. */
36756 rs6000_address_for_fpconvert (rtx x)
36758 int strict_p = (reload_in_progress || reload_completed);
36759 rtx addr;
36761 gcc_assert (MEM_P (x));
36762 addr = XEXP (x, 0);
36763 if (! legitimate_indirect_address_p (addr, strict_p)
36764 && ! legitimate_indexed_address_p (addr, strict_p))
36766 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
36768 rtx reg = XEXP (addr, 0);
36769 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
36770 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
36771 gcc_assert (REG_P (reg));
36772 emit_insn (gen_add3_insn (reg, reg, size_rtx));
36773 addr = reg;
36775 else if (GET_CODE (addr) == PRE_MODIFY)
36777 rtx reg = XEXP (addr, 0);
36778 rtx expr = XEXP (addr, 1);
36779 gcc_assert (REG_P (reg));
36780 gcc_assert (GET_CODE (expr) == PLUS);
36781 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
36782 addr = reg;
36785 x = replace_equiv_address (x, copy_addr_to_reg (addr));
36788 return x;
36791 /* Given a memory reference, if it is not in the form for altivec memory
36792 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
36793 convert to the altivec format. */
36796 rs6000_address_for_altivec (rtx x)
36798 gcc_assert (MEM_P (x));
36799 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
36801 rtx addr = XEXP (x, 0);
36802 int strict_p = (reload_in_progress || reload_completed);
36804 if (!legitimate_indexed_address_p (addr, strict_p)
36805 && !legitimate_indirect_address_p (addr, strict_p))
36806 addr = copy_to_mode_reg (Pmode, addr);
36808 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
36809 x = change_address (x, GET_MODE (x), addr);
36812 return x;
36815 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
36817 On the RS/6000, all integer constants are acceptable, most won't be valid
36818 for particular insns, though. Only easy FP constants are acceptable. */
36820 static bool
36821 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
36823 if (TARGET_ELF && tls_referenced_p (x))
36824 return false;
36826 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
36827 || GET_MODE (x) == VOIDmode
36828 || (TARGET_POWERPC64 && mode == DImode)
36829 || easy_fp_constant (x, mode)
36830 || easy_vector_constant (x, mode));
36834 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
36836 static bool
36837 chain_already_loaded (rtx_insn *last)
36839 for (; last != NULL; last = PREV_INSN (last))
36841 if (NONJUMP_INSN_P (last))
36843 rtx patt = PATTERN (last);
36845 if (GET_CODE (patt) == SET)
36847 rtx lhs = XEXP (patt, 0);
36849 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
36850 return true;
36854 return false;
36857 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
36859 void
36860 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
36862 const bool direct_call_p
36863 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
36864 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
36865 rtx toc_load = NULL_RTX;
36866 rtx toc_restore = NULL_RTX;
36867 rtx func_addr;
36868 rtx abi_reg = NULL_RTX;
36869 rtx call[4];
36870 int n_call;
36871 rtx insn;
36873 /* Handle longcall attributes. */
36874 if (INTVAL (cookie) & CALL_LONG)
36875 func_desc = rs6000_longcall_ref (func_desc);
36877 /* Handle indirect calls. */
36878 if (GET_CODE (func_desc) != SYMBOL_REF
36879 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
36881 /* Save the TOC into its reserved slot before the call,
36882 and prepare to restore it after the call. */
36883 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
36884 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
36885 rtx stack_toc_mem = gen_frame_mem (Pmode,
36886 gen_rtx_PLUS (Pmode, stack_ptr,
36887 stack_toc_offset));
36888 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
36889 gen_rtvec (1, stack_toc_offset),
36890 UNSPEC_TOCSLOT);
36891 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
36893 /* Can we optimize saving the TOC in the prologue or
36894 do we need to do it at every call? */
36895 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
36896 cfun->machine->save_toc_in_prologue = true;
36897 else
36899 MEM_VOLATILE_P (stack_toc_mem) = 1;
36900 emit_move_insn (stack_toc_mem, toc_reg);
36903 if (DEFAULT_ABI == ABI_ELFv2)
36905 /* A function pointer in the ELFv2 ABI is just a plain address, but
36906 the ABI requires it to be loaded into r12 before the call. */
36907 func_addr = gen_rtx_REG (Pmode, 12);
36908 emit_move_insn (func_addr, func_desc);
36909 abi_reg = func_addr;
36911 else
36913 /* A function pointer under AIX is a pointer to a data area whose
36914 first word contains the actual address of the function, whose
36915 second word contains a pointer to its TOC, and whose third word
36916 contains a value to place in the static chain register (r11).
36917 Note that if we load the static chain, our "trampoline" need
36918 not have any executable code. */
36920 /* Load up address of the actual function. */
36921 func_desc = force_reg (Pmode, func_desc);
36922 func_addr = gen_reg_rtx (Pmode);
36923 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
36925 /* Prepare to load the TOC of the called function. Note that the
36926 TOC load must happen immediately before the actual call so
36927 that unwinding the TOC registers works correctly. See the
36928 comment in frob_update_context. */
36929 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
36930 rtx func_toc_mem = gen_rtx_MEM (Pmode,
36931 gen_rtx_PLUS (Pmode, func_desc,
36932 func_toc_offset));
36933 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
36935 /* If we have a static chain, load it up. But, if the call was
36936 originally direct, the 3rd word has not been written since no
36937 trampoline has been built, so we ought not to load it, lest we
36938 override a static chain value. */
36939 if (!direct_call_p
36940 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
36941 && !chain_already_loaded (get_current_sequence ()->next->last))
36943 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
36944 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
36945 rtx func_sc_mem = gen_rtx_MEM (Pmode,
36946 gen_rtx_PLUS (Pmode, func_desc,
36947 func_sc_offset));
36948 emit_move_insn (sc_reg, func_sc_mem);
36949 abi_reg = sc_reg;
36953 else
36955 /* Direct calls use the TOC: for local calls, the callee will
36956 assume the TOC register is set; for non-local calls, the
36957 PLT stub needs the TOC register. */
36958 abi_reg = toc_reg;
36959 func_addr = func_desc;
36962 /* Create the call. */
36963 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
36964 if (value != NULL_RTX)
36965 call[0] = gen_rtx_SET (value, call[0]);
36966 n_call = 1;
36968 if (toc_load)
36969 call[n_call++] = toc_load;
36970 if (toc_restore)
36971 call[n_call++] = toc_restore;
36973 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
36975 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
36976 insn = emit_call_insn (insn);
36978 /* Mention all registers defined by the ABI to hold information
36979 as uses in CALL_INSN_FUNCTION_USAGE. */
36980 if (abi_reg)
36981 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
36984 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
36986 void
36987 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
36989 rtx call[2];
36990 rtx insn;
36992 gcc_assert (INTVAL (cookie) == 0);
36994 /* Create the call. */
36995 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
36996 if (value != NULL_RTX)
36997 call[0] = gen_rtx_SET (value, call[0]);
36999 call[1] = simple_return_rtx;
37001 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37002 insn = emit_call_insn (insn);
37004 /* Note use of the TOC register. */
37005 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37006 /* We need to also mark a use of the link register since the function we
37007 sibling-call to will use it to return to our caller. */
37008 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
37011 /* Return whether we need to always update the saved TOC pointer when we update
37012 the stack pointer. */
37014 static bool
37015 rs6000_save_toc_in_prologue_p (void)
37017 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37020 #ifdef HAVE_GAS_HIDDEN
37021 # define USE_HIDDEN_LINKONCE 1
37022 #else
37023 # define USE_HIDDEN_LINKONCE 0
37024 #endif
37026 /* Fills in the label name that should be used for a 476 link stack thunk. */
37028 void
37029 get_ppc476_thunk_name (char name[32])
37031 gcc_assert (TARGET_LINK_STACK);
37033 if (USE_HIDDEN_LINKONCE)
37034 sprintf (name, "__ppc476.get_thunk");
37035 else
37036 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37039 /* This function emits the simple thunk routine that is used to preserve
37040 the link stack on the 476 cpu. */
37042 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37043 static void
37044 rs6000_code_end (void)
37046 char name[32];
37047 tree decl;
37049 if (!TARGET_LINK_STACK)
37050 return;
37052 get_ppc476_thunk_name (name);
37054 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37055 build_function_type_list (void_type_node, NULL_TREE));
37056 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37057 NULL_TREE, void_type_node);
37058 TREE_PUBLIC (decl) = 1;
37059 TREE_STATIC (decl) = 1;
37061 #if RS6000_WEAK
37062 if (USE_HIDDEN_LINKONCE)
37064 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37065 targetm.asm_out.unique_section (decl, 0);
37066 switch_to_section (get_named_section (decl, NULL, 0));
37067 DECL_WEAK (decl) = 1;
37068 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37069 targetm.asm_out.globalize_label (asm_out_file, name);
37070 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37071 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37073 else
37074 #endif
37076 switch_to_section (text_section);
37077 ASM_OUTPUT_LABEL (asm_out_file, name);
37080 DECL_INITIAL (decl) = make_node (BLOCK);
37081 current_function_decl = decl;
37082 allocate_struct_function (decl, false);
37083 init_function_start (decl);
37084 first_function_block_is_cold = false;
37085 /* Make sure unwind info is emitted for the thunk if needed. */
37086 final_start_function (emit_barrier (), asm_out_file, 1);
37088 fputs ("\tblr\n", asm_out_file);
37090 final_end_function ();
37091 init_insn_lengths ();
37092 free_after_compilation (cfun);
37093 set_cfun (NULL);
37094 current_function_decl = NULL;
37097 /* Add r30 to hard reg set if the prologue sets it up and it is not
37098 pic_offset_table_rtx. */
37100 static void
37101 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37103 if (!TARGET_SINGLE_PIC_BASE
37104 && TARGET_TOC
37105 && TARGET_MINIMAL_TOC
37106 && get_pool_size () != 0)
37107 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37108 if (cfun->machine->split_stack_argp_used)
37109 add_to_hard_reg_set (&set->set, Pmode, 12);
37113 /* Helper function for rs6000_split_logical to emit a logical instruction after
37114 spliting the operation to single GPR registers.
37116 DEST is the destination register.
37117 OP1 and OP2 are the input source registers.
37118 CODE is the base operation (AND, IOR, XOR, NOT).
37119 MODE is the machine mode.
37120 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37121 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37122 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37124 static void
37125 rs6000_split_logical_inner (rtx dest,
37126 rtx op1,
37127 rtx op2,
37128 enum rtx_code code,
37129 machine_mode mode,
37130 bool complement_final_p,
37131 bool complement_op1_p,
37132 bool complement_op2_p)
37134 rtx bool_rtx;
37136 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37137 if (op2 && GET_CODE (op2) == CONST_INT
37138 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37139 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37141 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37142 HOST_WIDE_INT value = INTVAL (op2) & mask;
37144 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37145 if (code == AND)
37147 if (value == 0)
37149 emit_insn (gen_rtx_SET (dest, const0_rtx));
37150 return;
37153 else if (value == mask)
37155 if (!rtx_equal_p (dest, op1))
37156 emit_insn (gen_rtx_SET (dest, op1));
37157 return;
37161 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37162 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37163 else if (code == IOR || code == XOR)
37165 if (value == 0)
37167 if (!rtx_equal_p (dest, op1))
37168 emit_insn (gen_rtx_SET (dest, op1));
37169 return;
37174 if (code == AND && mode == SImode
37175 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37177 emit_insn (gen_andsi3 (dest, op1, op2));
37178 return;
37181 if (complement_op1_p)
37182 op1 = gen_rtx_NOT (mode, op1);
37184 if (complement_op2_p)
37185 op2 = gen_rtx_NOT (mode, op2);
37187 /* For canonical RTL, if only one arm is inverted it is the first. */
37188 if (!complement_op1_p && complement_op2_p)
37189 std::swap (op1, op2);
37191 bool_rtx = ((code == NOT)
37192 ? gen_rtx_NOT (mode, op1)
37193 : gen_rtx_fmt_ee (code, mode, op1, op2));
37195 if (complement_final_p)
37196 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37198 emit_insn (gen_rtx_SET (dest, bool_rtx));
37201 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37202 operations are split immediately during RTL generation to allow for more
37203 optimizations of the AND/IOR/XOR.
37205 OPERANDS is an array containing the destination and two input operands.
37206 CODE is the base operation (AND, IOR, XOR, NOT).
37207 MODE is the machine mode.
37208 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37209 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37210 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37211 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37212 formation of the AND instructions. */
37214 static void
37215 rs6000_split_logical_di (rtx operands[3],
37216 enum rtx_code code,
37217 bool complement_final_p,
37218 bool complement_op1_p,
37219 bool complement_op2_p)
37221 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37222 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37223 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37224 enum hi_lo { hi = 0, lo = 1 };
37225 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37226 size_t i;
37228 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37229 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37230 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37231 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37233 if (code == NOT)
37234 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37235 else
37237 if (GET_CODE (operands[2]) != CONST_INT)
37239 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37240 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37242 else
37244 HOST_WIDE_INT value = INTVAL (operands[2]);
37245 HOST_WIDE_INT value_hi_lo[2];
37247 gcc_assert (!complement_final_p);
37248 gcc_assert (!complement_op1_p);
37249 gcc_assert (!complement_op2_p);
37251 value_hi_lo[hi] = value >> 32;
37252 value_hi_lo[lo] = value & lower_32bits;
37254 for (i = 0; i < 2; i++)
37256 HOST_WIDE_INT sub_value = value_hi_lo[i];
37258 if (sub_value & sign_bit)
37259 sub_value |= upper_32bits;
37261 op2_hi_lo[i] = GEN_INT (sub_value);
37263 /* If this is an AND instruction, check to see if we need to load
37264 the value in a register. */
37265 if (code == AND && sub_value != -1 && sub_value != 0
37266 && !and_operand (op2_hi_lo[i], SImode))
37267 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37272 for (i = 0; i < 2; i++)
37274 /* Split large IOR/XOR operations. */
37275 if ((code == IOR || code == XOR)
37276 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37277 && !complement_final_p
37278 && !complement_op1_p
37279 && !complement_op2_p
37280 && !logical_const_operand (op2_hi_lo[i], SImode))
37282 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37283 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37284 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37285 rtx tmp = gen_reg_rtx (SImode);
37287 /* Make sure the constant is sign extended. */
37288 if ((hi_16bits & sign_bit) != 0)
37289 hi_16bits |= upper_32bits;
37291 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37292 code, SImode, false, false, false);
37294 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37295 code, SImode, false, false, false);
37297 else
37298 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37299 code, SImode, complement_final_p,
37300 complement_op1_p, complement_op2_p);
37303 return;
37306 /* Split the insns that make up boolean operations operating on multiple GPR
37307 registers. The boolean MD patterns ensure that the inputs either are
37308 exactly the same as the output registers, or there is no overlap.
37310 OPERANDS is an array containing the destination and two input operands.
37311 CODE is the base operation (AND, IOR, XOR, NOT).
37312 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37313 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37314 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37316 void
37317 rs6000_split_logical (rtx operands[3],
37318 enum rtx_code code,
37319 bool complement_final_p,
37320 bool complement_op1_p,
37321 bool complement_op2_p)
37323 machine_mode mode = GET_MODE (operands[0]);
37324 machine_mode sub_mode;
37325 rtx op0, op1, op2;
37326 int sub_size, regno0, regno1, nregs, i;
37328 /* If this is DImode, use the specialized version that can run before
37329 register allocation. */
37330 if (mode == DImode && !TARGET_POWERPC64)
37332 rs6000_split_logical_di (operands, code, complement_final_p,
37333 complement_op1_p, complement_op2_p);
37334 return;
37337 op0 = operands[0];
37338 op1 = operands[1];
37339 op2 = (code == NOT) ? NULL_RTX : operands[2];
37340 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37341 sub_size = GET_MODE_SIZE (sub_mode);
37342 regno0 = REGNO (op0);
37343 regno1 = REGNO (op1);
37345 gcc_assert (reload_completed);
37346 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37347 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37349 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37350 gcc_assert (nregs > 1);
37352 if (op2 && REG_P (op2))
37353 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37355 for (i = 0; i < nregs; i++)
37357 int offset = i * sub_size;
37358 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37359 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37360 rtx sub_op2 = ((code == NOT)
37361 ? NULL_RTX
37362 : simplify_subreg (sub_mode, op2, mode, offset));
37364 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37365 complement_final_p, complement_op1_p,
37366 complement_op2_p);
37369 return;
37373 /* Return true if the peephole2 can combine a load involving a combination of
37374 an addis instruction and a load with an offset that can be fused together on
37375 a power8. */
37377 bool
37378 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
37379 rtx addis_value, /* addis value. */
37380 rtx target, /* target register that is loaded. */
37381 rtx mem) /* bottom part of the memory addr. */
37383 rtx addr;
37384 rtx base_reg;
37386 /* Validate arguments. */
37387 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37388 return false;
37390 if (!base_reg_operand (target, GET_MODE (target)))
37391 return false;
37393 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37394 return false;
37396 /* Allow sign/zero extension. */
37397 if (GET_CODE (mem) == ZERO_EXTEND
37398 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
37399 mem = XEXP (mem, 0);
37401 if (!MEM_P (mem))
37402 return false;
37404 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
37405 return false;
37407 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37408 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
37409 return false;
37411 /* Validate that the register used to load the high value is either the
37412 register being loaded, or we can safely replace its use.
37414 This function is only called from the peephole2 pass and we assume that
37415 there are 2 instructions in the peephole (addis and load), so we want to
37416 check if the target register was not used in the memory address and the
37417 register to hold the addis result is dead after the peephole. */
37418 if (REGNO (addis_reg) != REGNO (target))
37420 if (reg_mentioned_p (target, mem))
37421 return false;
37423 if (!peep2_reg_dead_p (2, addis_reg))
37424 return false;
37426 /* If the target register being loaded is the stack pointer, we must
37427 avoid loading any other value into it, even temporarily. */
37428 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37429 return false;
37432 base_reg = XEXP (addr, 0);
37433 return REGNO (addis_reg) == REGNO (base_reg);
37436 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37437 sequence. We adjust the addis register to use the target register. If the
37438 load sign extends, we adjust the code to do the zero extending load, and an
37439 explicit sign extension later since the fusion only covers zero extending
37440 loads.
37442 The operands are:
37443 operands[0] register set with addis (to be replaced with target)
37444 operands[1] value set via addis
37445 operands[2] target register being loaded
37446 operands[3] D-form memory reference using operands[0]. */
37448 void
37449 expand_fusion_gpr_load (rtx *operands)
37451 rtx addis_value = operands[1];
37452 rtx target = operands[2];
37453 rtx orig_mem = operands[3];
37454 rtx new_addr, new_mem, orig_addr, offset;
37455 enum rtx_code plus_or_lo_sum;
37456 machine_mode target_mode = GET_MODE (target);
37457 machine_mode extend_mode = target_mode;
37458 machine_mode ptr_mode = Pmode;
37459 enum rtx_code extend = UNKNOWN;
37461 if (GET_CODE (orig_mem) == ZERO_EXTEND
37462 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37464 extend = GET_CODE (orig_mem);
37465 orig_mem = XEXP (orig_mem, 0);
37466 target_mode = GET_MODE (orig_mem);
37469 gcc_assert (MEM_P (orig_mem));
37471 orig_addr = XEXP (orig_mem, 0);
37472 plus_or_lo_sum = GET_CODE (orig_addr);
37473 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37475 offset = XEXP (orig_addr, 1);
37476 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37477 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37479 if (extend != UNKNOWN)
37480 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
37482 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37483 UNSPEC_FUSION_GPR);
37484 emit_insn (gen_rtx_SET (target, new_mem));
37486 if (extend == SIGN_EXTEND)
37488 int sub_off = ((BYTES_BIG_ENDIAN)
37489 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
37490 : 0);
37491 rtx sign_reg
37492 = simplify_subreg (target_mode, target, extend_mode, sub_off);
37494 emit_insn (gen_rtx_SET (target,
37495 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
37498 return;
37501 /* Emit the addis instruction that will be part of a fused instruction
37502 sequence. */
37504 void
37505 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
37506 const char *mode_name)
37508 rtx fuse_ops[10];
37509 char insn_template[80];
37510 const char *addis_str = NULL;
37511 const char *comment_str = ASM_COMMENT_START;
37513 if (*comment_str == ' ')
37514 comment_str++;
37516 /* Emit the addis instruction. */
37517 fuse_ops[0] = target;
37518 if (satisfies_constraint_L (addis_value))
37520 fuse_ops[1] = addis_value;
37521 addis_str = "lis %0,%v1";
37524 else if (GET_CODE (addis_value) == PLUS)
37526 rtx op0 = XEXP (addis_value, 0);
37527 rtx op1 = XEXP (addis_value, 1);
37529 if (REG_P (op0) && CONST_INT_P (op1)
37530 && satisfies_constraint_L (op1))
37532 fuse_ops[1] = op0;
37533 fuse_ops[2] = op1;
37534 addis_str = "addis %0,%1,%v2";
37538 else if (GET_CODE (addis_value) == HIGH)
37540 rtx value = XEXP (addis_value, 0);
37541 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
37543 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
37544 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
37545 if (TARGET_ELF)
37546 addis_str = "addis %0,%2,%1@toc@ha";
37548 else if (TARGET_XCOFF)
37549 addis_str = "addis %0,%1@u(%2)";
37551 else
37552 gcc_unreachable ();
37555 else if (GET_CODE (value) == PLUS)
37557 rtx op0 = XEXP (value, 0);
37558 rtx op1 = XEXP (value, 1);
37560 if (GET_CODE (op0) == UNSPEC
37561 && XINT (op0, 1) == UNSPEC_TOCREL
37562 && CONST_INT_P (op1))
37564 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
37565 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
37566 fuse_ops[3] = op1;
37567 if (TARGET_ELF)
37568 addis_str = "addis %0,%2,%1+%3@toc@ha";
37570 else if (TARGET_XCOFF)
37571 addis_str = "addis %0,%1+%3@u(%2)";
37573 else
37574 gcc_unreachable ();
37578 else if (satisfies_constraint_L (value))
37580 fuse_ops[1] = value;
37581 addis_str = "lis %0,%v1";
37584 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
37586 fuse_ops[1] = value;
37587 addis_str = "lis %0,%1@ha";
37591 if (!addis_str)
37592 fatal_insn ("Could not generate addis value for fusion", addis_value);
37594 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
37595 comment, mode_name);
37596 output_asm_insn (insn_template, fuse_ops);
37599 /* Emit a D-form load or store instruction that is the second instruction
37600 of a fusion sequence. */
37602 void
37603 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
37604 const char *insn_str)
37606 rtx fuse_ops[10];
37607 char insn_template[80];
37609 fuse_ops[0] = load_store_reg;
37610 fuse_ops[1] = addis_reg;
37612 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
37614 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
37615 fuse_ops[2] = offset;
37616 output_asm_insn (insn_template, fuse_ops);
37619 else if (GET_CODE (offset) == UNSPEC
37620 && XINT (offset, 1) == UNSPEC_TOCREL)
37622 if (TARGET_ELF)
37623 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
37625 else if (TARGET_XCOFF)
37626 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37628 else
37629 gcc_unreachable ();
37631 fuse_ops[2] = XVECEXP (offset, 0, 0);
37632 output_asm_insn (insn_template, fuse_ops);
37635 else if (GET_CODE (offset) == PLUS
37636 && GET_CODE (XEXP (offset, 0)) == UNSPEC
37637 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
37638 && CONST_INT_P (XEXP (offset, 1)))
37640 rtx tocrel_unspec = XEXP (offset, 0);
37641 if (TARGET_ELF)
37642 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
37644 else if (TARGET_XCOFF)
37645 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
37647 else
37648 gcc_unreachable ();
37650 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
37651 fuse_ops[3] = XEXP (offset, 1);
37652 output_asm_insn (insn_template, fuse_ops);
37655 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
37657 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37659 fuse_ops[2] = offset;
37660 output_asm_insn (insn_template, fuse_ops);
37663 else
37664 fatal_insn ("Unable to generate load/store offset for fusion", offset);
37666 return;
37669 /* Wrap a TOC address that can be fused to indicate that special fusion
37670 processing is needed. */
37673 fusion_wrap_memory_address (rtx old_mem)
37675 rtx old_addr = XEXP (old_mem, 0);
37676 rtvec v = gen_rtvec (1, old_addr);
37677 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
37678 return replace_equiv_address_nv (old_mem, new_addr, false);
37681 /* Given an address, convert it into the addis and load offset parts. Addresses
37682 created during the peephole2 process look like:
37683 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
37684 (unspec [(...)] UNSPEC_TOCREL))
37686 Addresses created via toc fusion look like:
37687 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
37689 static void
37690 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
37692 rtx hi, lo;
37694 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
37696 lo = XVECEXP (addr, 0, 0);
37697 hi = gen_rtx_HIGH (Pmode, lo);
37699 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
37701 hi = XEXP (addr, 0);
37702 lo = XEXP (addr, 1);
37704 else
37705 gcc_unreachable ();
37707 *p_hi = hi;
37708 *p_lo = lo;
37711 /* Return a string to fuse an addis instruction with a gpr load to the same
37712 register that we loaded up the addis instruction. The address that is used
37713 is the logical address that was formed during peephole2:
37714 (lo_sum (high) (low-part))
37716 Or the address is the TOC address that is wrapped before register allocation:
37717 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
37719 The code is complicated, so we call output_asm_insn directly, and just
37720 return "". */
37722 const char *
37723 emit_fusion_gpr_load (rtx target, rtx mem)
37725 rtx addis_value;
37726 rtx addr;
37727 rtx load_offset;
37728 const char *load_str = NULL;
37729 const char *mode_name = NULL;
37730 machine_mode mode;
37732 if (GET_CODE (mem) == ZERO_EXTEND)
37733 mem = XEXP (mem, 0);
37735 gcc_assert (REG_P (target) && MEM_P (mem));
37737 addr = XEXP (mem, 0);
37738 fusion_split_address (addr, &addis_value, &load_offset);
37740 /* Now emit the load instruction to the same register. */
37741 mode = GET_MODE (mem);
37742 switch (mode)
37744 case QImode:
37745 mode_name = "char";
37746 load_str = "lbz";
37747 break;
37749 case HImode:
37750 mode_name = "short";
37751 load_str = "lhz";
37752 break;
37754 case SImode:
37755 case SFmode:
37756 mode_name = (mode == SFmode) ? "float" : "int";
37757 load_str = "lwz";
37758 break;
37760 case DImode:
37761 case DFmode:
37762 gcc_assert (TARGET_POWERPC64);
37763 mode_name = (mode == DFmode) ? "double" : "long";
37764 load_str = "ld";
37765 break;
37767 default:
37768 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
37771 /* Emit the addis instruction. */
37772 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
37774 /* Emit the D-form load instruction. */
37775 emit_fusion_load_store (target, target, load_offset, load_str);
37777 return "";
37781 /* Return true if the peephole2 can combine a load/store involving a
37782 combination of an addis instruction and the memory operation. This was
37783 added to the ISA 3.0 (power9) hardware. */
37785 bool
37786 fusion_p9_p (rtx addis_reg, /* register set via addis. */
37787 rtx addis_value, /* addis value. */
37788 rtx dest, /* destination (memory or register). */
37789 rtx src) /* source (register or memory). */
37791 rtx addr, mem, offset;
37792 enum machine_mode mode = GET_MODE (src);
37794 /* Validate arguments. */
37795 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37796 return false;
37798 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37799 return false;
37801 /* Ignore extend operations that are part of the load. */
37802 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
37803 src = XEXP (src, 0);
37805 /* Test for memory<-register or register<-memory. */
37806 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
37808 if (!MEM_P (dest))
37809 return false;
37811 mem = dest;
37814 else if (MEM_P (src))
37816 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
37817 return false;
37819 mem = src;
37822 else
37823 return false;
37825 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37826 if (GET_CODE (addr) == PLUS)
37828 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
37829 return false;
37831 return satisfies_constraint_I (XEXP (addr, 1));
37834 else if (GET_CODE (addr) == LO_SUM)
37836 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
37837 return false;
37839 offset = XEXP (addr, 1);
37840 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
37841 return small_toc_ref (offset, GET_MODE (offset));
37843 else if (TARGET_ELF && !TARGET_POWERPC64)
37844 return CONSTANT_P (offset);
37847 return false;
37850 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
37851 load sequence.
37853 The operands are:
37854 operands[0] register set with addis
37855 operands[1] value set via addis
37856 operands[2] target register being loaded
37857 operands[3] D-form memory reference using operands[0].
37859 This is similar to the fusion introduced with power8, except it scales to
37860 both loads/stores and does not require the result register to be the same as
37861 the base register. At the moment, we only do this if register set with addis
37862 is dead. */
37864 void
37865 expand_fusion_p9_load (rtx *operands)
37867 rtx tmp_reg = operands[0];
37868 rtx addis_value = operands[1];
37869 rtx target = operands[2];
37870 rtx orig_mem = operands[3];
37871 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
37872 enum rtx_code plus_or_lo_sum;
37873 machine_mode target_mode = GET_MODE (target);
37874 machine_mode extend_mode = target_mode;
37875 machine_mode ptr_mode = Pmode;
37876 enum rtx_code extend = UNKNOWN;
37878 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
37880 extend = GET_CODE (orig_mem);
37881 orig_mem = XEXP (orig_mem, 0);
37882 target_mode = GET_MODE (orig_mem);
37885 gcc_assert (MEM_P (orig_mem));
37887 orig_addr = XEXP (orig_mem, 0);
37888 plus_or_lo_sum = GET_CODE (orig_addr);
37889 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37891 offset = XEXP (orig_addr, 1);
37892 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37893 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37895 if (extend != UNKNOWN)
37896 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
37898 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37899 UNSPEC_FUSION_P9);
37901 set = gen_rtx_SET (target, new_mem);
37902 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
37903 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
37904 emit_insn (insn);
37906 return;
37909 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
37910 store sequence.
37912 The operands are:
37913 operands[0] register set with addis
37914 operands[1] value set via addis
37915 operands[2] target D-form memory being stored to
37916 operands[3] register being stored
37918 This is similar to the fusion introduced with power8, except it scales to
37919 both loads/stores and does not require the result register to be the same as
37920 the base register. At the moment, we only do this if register set with addis
37921 is dead. */
37923 void
37924 expand_fusion_p9_store (rtx *operands)
37926 rtx tmp_reg = operands[0];
37927 rtx addis_value = operands[1];
37928 rtx orig_mem = operands[2];
37929 rtx src = operands[3];
37930 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
37931 enum rtx_code plus_or_lo_sum;
37932 machine_mode target_mode = GET_MODE (orig_mem);
37933 machine_mode ptr_mode = Pmode;
37935 gcc_assert (MEM_P (orig_mem));
37937 orig_addr = XEXP (orig_mem, 0);
37938 plus_or_lo_sum = GET_CODE (orig_addr);
37939 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37941 offset = XEXP (orig_addr, 1);
37942 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37943 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37945 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
37946 UNSPEC_FUSION_P9);
37948 set = gen_rtx_SET (new_mem, new_src);
37949 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
37950 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
37951 emit_insn (insn);
37953 return;
37956 /* Return a string to fuse an addis instruction with a load using extended
37957 fusion. The address that is used is the logical address that was formed
37958 during peephole2: (lo_sum (high) (low-part))
37960 The code is complicated, so we call output_asm_insn directly, and just
37961 return "". */
37963 const char *
37964 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
37966 enum machine_mode mode = GET_MODE (reg);
37967 rtx hi;
37968 rtx lo;
37969 rtx addr;
37970 const char *load_string;
37971 int r;
37973 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
37975 mem = XEXP (mem, 0);
37976 mode = GET_MODE (mem);
37979 if (GET_CODE (reg) == SUBREG)
37981 gcc_assert (SUBREG_BYTE (reg) == 0);
37982 reg = SUBREG_REG (reg);
37985 if (!REG_P (reg))
37986 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
37988 r = REGNO (reg);
37989 if (FP_REGNO_P (r))
37991 if (mode == SFmode)
37992 load_string = "lfs";
37993 else if (mode == DFmode || mode == DImode)
37994 load_string = "lfd";
37995 else
37996 gcc_unreachable ();
37998 else if (INT_REGNO_P (r))
38000 switch (mode)
38002 case QImode:
38003 load_string = "lbz";
38004 break;
38005 case HImode:
38006 load_string = "lhz";
38007 break;
38008 case SImode:
38009 case SFmode:
38010 load_string = "lwz";
38011 break;
38012 case DImode:
38013 case DFmode:
38014 if (!TARGET_POWERPC64)
38015 gcc_unreachable ();
38016 load_string = "ld";
38017 break;
38018 default:
38019 gcc_unreachable ();
38022 else
38023 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38025 if (!MEM_P (mem))
38026 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38028 addr = XEXP (mem, 0);
38029 fusion_split_address (addr, &hi, &lo);
38031 /* Emit the addis instruction. */
38032 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
38034 /* Emit the D-form load instruction. */
38035 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38037 return "";
38040 /* Return a string to fuse an addis instruction with a store using extended
38041 fusion. The address that is used is the logical address that was formed
38042 during peephole2: (lo_sum (high) (low-part))
38044 The code is complicated, so we call output_asm_insn directly, and just
38045 return "". */
38047 const char *
38048 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38050 enum machine_mode mode = GET_MODE (reg);
38051 rtx hi;
38052 rtx lo;
38053 rtx addr;
38054 const char *store_string;
38055 int r;
38057 if (GET_CODE (reg) == SUBREG)
38059 gcc_assert (SUBREG_BYTE (reg) == 0);
38060 reg = SUBREG_REG (reg);
38063 if (!REG_P (reg))
38064 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38066 r = REGNO (reg);
38067 if (FP_REGNO_P (r))
38069 if (mode == SFmode)
38070 store_string = "stfs";
38071 else if (mode == DFmode)
38072 store_string = "stfd";
38073 else
38074 gcc_unreachable ();
38076 else if (INT_REGNO_P (r))
38078 switch (mode)
38080 case QImode:
38081 store_string = "stb";
38082 break;
38083 case HImode:
38084 store_string = "sth";
38085 break;
38086 case SImode:
38087 case SFmode:
38088 store_string = "stw";
38089 break;
38090 case DImode:
38091 case DFmode:
38092 if (!TARGET_POWERPC64)
38093 gcc_unreachable ();
38094 store_string = "std";
38095 break;
38096 default:
38097 gcc_unreachable ();
38100 else
38101 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38103 if (!MEM_P (mem))
38104 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38106 addr = XEXP (mem, 0);
38107 fusion_split_address (addr, &hi, &lo);
38109 /* Emit the addis instruction. */
38110 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
38112 /* Emit the D-form load instruction. */
38113 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38115 return "";
38119 /* Analyze vector computations and remove unnecessary doubleword
38120 swaps (xxswapdi instructions). This pass is performed only
38121 for little-endian VSX code generation.
38123 For this specific case, loads and stores of 4x32 and 2x64 vectors
38124 are inefficient. These are implemented using the lvx2dx and
38125 stvx2dx instructions, which invert the order of doublewords in
38126 a vector register. Thus the code generation inserts an xxswapdi
38127 after each such load, and prior to each such store. (For spill
38128 code after register assignment, an additional xxswapdi is inserted
38129 following each store in order to return a hard register to its
38130 unpermuted value.)
38132 The extra xxswapdi instructions reduce performance. This can be
38133 particularly bad for vectorized code. The purpose of this pass
38134 is to reduce the number of xxswapdi instructions required for
38135 correctness.
38137 The primary insight is that much code that operates on vectors
38138 does not care about the relative order of elements in a register,
38139 so long as the correct memory order is preserved. If we have
38140 a computation where all input values are provided by lvxd2x/xxswapdi
38141 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
38142 and all intermediate computations are pure SIMD (independent of
38143 element order), then all the xxswapdi's associated with the loads
38144 and stores may be removed.
38146 This pass uses some of the infrastructure and logical ideas from
38147 the "web" pass in web.c. We create maximal webs of computations
38148 fitting the description above using union-find. Each such web is
38149 then optimized by removing its unnecessary xxswapdi instructions.
38151 The pass is placed prior to global optimization so that we can
38152 perform the optimization in the safest and simplest way possible;
38153 that is, by replacing each xxswapdi insn with a register copy insn.
38154 Subsequent forward propagation will remove copies where possible.
38156 There are some operations sensitive to element order for which we
38157 can still allow the operation, provided we modify those operations.
38158 These include CONST_VECTORs, for which we must swap the first and
38159 second halves of the constant vector; and SUBREGs, for which we
38160 must adjust the byte offset to account for the swapped doublewords.
38161 A remaining opportunity would be non-immediate-form splats, for
38162 which we should adjust the selected lane of the input. We should
38163 also make code generation adjustments for sum-across operations,
38164 since this is a common vectorizer reduction.
38166 Because we run prior to the first split, we can see loads and stores
38167 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
38168 vector loads and stores that have not yet been split into a permuting
38169 load/store and a swap. (One way this can happen is with a builtin
38170 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
38171 than deleting a swap, we convert the load/store into a permuting
38172 load/store (which effectively removes the swap). */
38174 /* Notes on Permutes
38176 We do not currently handle computations that contain permutes. There
38177 is a general transformation that can be performed correctly, but it
38178 may introduce more expensive code than it replaces. To handle these
38179 would require a cost model to determine when to perform the optimization.
38180 This commentary records how this could be done if desired.
38182 The most general permute is something like this (example for V16QI):
38184 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
38185 (parallel [(const_int a0) (const_int a1)
38187 (const_int a14) (const_int a15)]))
38189 where a0,...,a15 are in [0,31] and select elements from op1 and op2
38190 to produce in the result.
38192 Regardless of mode, we can convert the PARALLEL to a mask of 16
38193 byte-element selectors. Let's call this M, with M[i] representing
38194 the ith byte-element selector value. Then if we swap doublewords
38195 throughout the computation, we can get correct behavior by replacing
38196 M with M' as follows:
38198 M'[i] = { (M[i]+8)%16 : M[i] in [0,15]
38199 { ((M[i]+8)%16)+16 : M[i] in [16,31]
38201 This seems promising at first, since we are just replacing one mask
38202 with another. But certain masks are preferable to others. If M
38203 is a mask that matches a vmrghh pattern, for example, M' certainly
38204 will not. Instead of a single vmrghh, we would generate a load of
38205 M' and a vperm. So we would need to know how many xxswapd's we can
38206 remove as a result of this transformation to determine if it's
38207 profitable; and preferably the logic would need to be aware of all
38208 the special preferable masks.
38210 Another form of permute is an UNSPEC_VPERM, in which the mask is
38211 already in a register. In some cases, this mask may be a constant
38212 that we can discover with ud-chains, in which case the above
38213 transformation is ok. However, the common usage here is for the
38214 mask to be produced by an UNSPEC_LVSL, in which case the mask
38215 cannot be known at compile time. In such a case we would have to
38216 generate several instructions to compute M' as above at run time,
38217 and a cost model is needed again.
38219 However, when the mask M for an UNSPEC_VPERM is loaded from the
38220 constant pool, we can replace M with M' as above at no cost
38221 beyond adding a constant pool entry. */
38223 /* This is based on the union-find logic in web.c. web_entry_base is
38224 defined in df.h. */
38225 class swap_web_entry : public web_entry_base
38227 public:
38228 /* Pointer to the insn. */
38229 rtx_insn *insn;
38230 /* Set if insn contains a mention of a vector register. All other
38231 fields are undefined if this field is unset. */
38232 unsigned int is_relevant : 1;
38233 /* Set if insn is a load. */
38234 unsigned int is_load : 1;
38235 /* Set if insn is a store. */
38236 unsigned int is_store : 1;
38237 /* Set if insn is a doubleword swap. This can either be a register swap
38238 or a permuting load or store (test is_load and is_store for this). */
38239 unsigned int is_swap : 1;
38240 /* Set if the insn has a live-in use of a parameter register. */
38241 unsigned int is_live_in : 1;
38242 /* Set if the insn has a live-out def of a return register. */
38243 unsigned int is_live_out : 1;
38244 /* Set if the insn contains a subreg reference of a vector register. */
38245 unsigned int contains_subreg : 1;
38246 /* Set if the insn contains a 128-bit integer operand. */
38247 unsigned int is_128_int : 1;
38248 /* Set if this is a call-insn. */
38249 unsigned int is_call : 1;
38250 /* Set if this insn does not perform a vector operation for which
38251 element order matters, or if we know how to fix it up if it does.
38252 Undefined if is_swap is set. */
38253 unsigned int is_swappable : 1;
38254 /* A nonzero value indicates what kind of special handling for this
38255 insn is required if doublewords are swapped. Undefined if
38256 is_swappable is not set. */
38257 unsigned int special_handling : 4;
38258 /* Set if the web represented by this entry cannot be optimized. */
38259 unsigned int web_not_optimizable : 1;
38260 /* Set if this insn should be deleted. */
38261 unsigned int will_delete : 1;
38264 enum special_handling_values {
38265 SH_NONE = 0,
38266 SH_CONST_VECTOR,
38267 SH_SUBREG,
38268 SH_NOSWAP_LD,
38269 SH_NOSWAP_ST,
38270 SH_EXTRACT,
38271 SH_SPLAT,
38272 SH_XXPERMDI,
38273 SH_CONCAT,
38274 SH_VPERM
38277 /* Union INSN with all insns containing definitions that reach USE.
38278 Detect whether USE is live-in to the current function. */
38279 static void
38280 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
38282 struct df_link *link = DF_REF_CHAIN (use);
38284 if (!link)
38285 insn_entry[INSN_UID (insn)].is_live_in = 1;
38287 while (link)
38289 if (DF_REF_IS_ARTIFICIAL (link->ref))
38290 insn_entry[INSN_UID (insn)].is_live_in = 1;
38292 if (DF_REF_INSN_INFO (link->ref))
38294 rtx def_insn = DF_REF_INSN (link->ref);
38295 (void)unionfind_union (insn_entry + INSN_UID (insn),
38296 insn_entry + INSN_UID (def_insn));
38299 link = link->next;
38303 /* Union INSN with all insns containing uses reached from DEF.
38304 Detect whether DEF is live-out from the current function. */
38305 static void
38306 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
38308 struct df_link *link = DF_REF_CHAIN (def);
38310 if (!link)
38311 insn_entry[INSN_UID (insn)].is_live_out = 1;
38313 while (link)
38315 /* This could be an eh use or some other artificial use;
38316 we treat these all the same (killing the optimization). */
38317 if (DF_REF_IS_ARTIFICIAL (link->ref))
38318 insn_entry[INSN_UID (insn)].is_live_out = 1;
38320 if (DF_REF_INSN_INFO (link->ref))
38322 rtx use_insn = DF_REF_INSN (link->ref);
38323 (void)unionfind_union (insn_entry + INSN_UID (insn),
38324 insn_entry + INSN_UID (use_insn));
38327 link = link->next;
38331 /* Return 1 iff INSN is a load insn, including permuting loads that
38332 represent an lvxd2x instruction; else return 0. */
38333 static unsigned int
38334 insn_is_load_p (rtx insn)
38336 rtx body = PATTERN (insn);
38338 if (GET_CODE (body) == SET)
38340 if (GET_CODE (SET_SRC (body)) == MEM)
38341 return 1;
38343 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
38344 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
38345 return 1;
38347 return 0;
38350 if (GET_CODE (body) != PARALLEL)
38351 return 0;
38353 rtx set = XVECEXP (body, 0, 0);
38355 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
38356 return 1;
38358 return 0;
38361 /* Return 1 iff INSN is a store insn, including permuting stores that
38362 represent an stvxd2x instruction; else return 0. */
38363 static unsigned int
38364 insn_is_store_p (rtx insn)
38366 rtx body = PATTERN (insn);
38367 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
38368 return 1;
38369 if (GET_CODE (body) != PARALLEL)
38370 return 0;
38371 rtx set = XVECEXP (body, 0, 0);
38372 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
38373 return 1;
38374 return 0;
38377 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
38378 a permuting load, or a permuting store. */
38379 static unsigned int
38380 insn_is_swap_p (rtx insn)
38382 rtx body = PATTERN (insn);
38383 if (GET_CODE (body) != SET)
38384 return 0;
38385 rtx rhs = SET_SRC (body);
38386 if (GET_CODE (rhs) != VEC_SELECT)
38387 return 0;
38388 rtx parallel = XEXP (rhs, 1);
38389 if (GET_CODE (parallel) != PARALLEL)
38390 return 0;
38391 unsigned int len = XVECLEN (parallel, 0);
38392 if (len != 2 && len != 4 && len != 8 && len != 16)
38393 return 0;
38394 for (unsigned int i = 0; i < len / 2; ++i)
38396 rtx op = XVECEXP (parallel, 0, i);
38397 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
38398 return 0;
38400 for (unsigned int i = len / 2; i < len; ++i)
38402 rtx op = XVECEXP (parallel, 0, i);
38403 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
38404 return 0;
38406 return 1;
38409 /* Return TRUE if insn is a swap fed by a load from the constant pool. */
38410 static bool
38411 const_load_sequence_p (swap_web_entry *insn_entry, rtx insn)
38413 unsigned uid = INSN_UID (insn);
38414 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load)
38415 return false;
38417 /* Find the unique use in the swap and locate its def. If the def
38418 isn't unique, punt. */
38419 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38420 df_ref use;
38421 FOR_EACH_INSN_INFO_USE (use, insn_info)
38423 struct df_link *def_link = DF_REF_CHAIN (use);
38424 if (!def_link || def_link->next)
38425 return false;
38427 rtx def_insn = DF_REF_INSN (def_link->ref);
38428 unsigned uid2 = INSN_UID (def_insn);
38429 if (!insn_entry[uid2].is_load || !insn_entry[uid2].is_swap)
38430 return false;
38432 rtx body = PATTERN (def_insn);
38433 if (GET_CODE (body) != SET
38434 || GET_CODE (SET_SRC (body)) != VEC_SELECT
38435 || GET_CODE (XEXP (SET_SRC (body), 0)) != MEM)
38436 return false;
38438 rtx mem = XEXP (SET_SRC (body), 0);
38439 rtx base_reg = XEXP (mem, 0);
38441 df_ref base_use;
38442 insn_info = DF_INSN_INFO_GET (def_insn);
38443 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
38445 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
38446 continue;
38448 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
38449 if (!base_def_link || base_def_link->next)
38450 return false;
38452 rtx tocrel_insn = DF_REF_INSN (base_def_link->ref);
38453 rtx tocrel_body = PATTERN (tocrel_insn);
38454 rtx base, offset;
38455 if (GET_CODE (tocrel_body) != SET)
38456 return false;
38457 /* There is an extra level of indirection for small/large
38458 code models. */
38459 rtx tocrel_expr = SET_SRC (tocrel_body);
38460 if (GET_CODE (tocrel_expr) == MEM)
38461 tocrel_expr = XEXP (tocrel_expr, 0);
38462 if (!toc_relative_expr_p (tocrel_expr, false))
38463 return false;
38464 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
38465 if (GET_CODE (base) != SYMBOL_REF || !CONSTANT_POOL_ADDRESS_P (base))
38466 return false;
38469 return true;
38472 /* Return TRUE iff OP matches a V2DF reduction pattern. See the
38473 definition of vsx_reduc_<VEC_reduc_name>_v2df in vsx.md. */
38474 static bool
38475 v2df_reduction_p (rtx op)
38477 if (GET_MODE (op) != V2DFmode)
38478 return false;
38480 enum rtx_code code = GET_CODE (op);
38481 if (code != PLUS && code != SMIN && code != SMAX)
38482 return false;
38484 rtx concat = XEXP (op, 0);
38485 if (GET_CODE (concat) != VEC_CONCAT)
38486 return false;
38488 rtx select0 = XEXP (concat, 0);
38489 rtx select1 = XEXP (concat, 1);
38490 if (GET_CODE (select0) != VEC_SELECT || GET_CODE (select1) != VEC_SELECT)
38491 return false;
38493 rtx reg0 = XEXP (select0, 0);
38494 rtx reg1 = XEXP (select1, 0);
38495 if (!rtx_equal_p (reg0, reg1) || !REG_P (reg0))
38496 return false;
38498 rtx parallel0 = XEXP (select0, 1);
38499 rtx parallel1 = XEXP (select1, 1);
38500 if (GET_CODE (parallel0) != PARALLEL || GET_CODE (parallel1) != PARALLEL)
38501 return false;
38503 if (!rtx_equal_p (XVECEXP (parallel0, 0, 0), const1_rtx)
38504 || !rtx_equal_p (XVECEXP (parallel1, 0, 0), const0_rtx))
38505 return false;
38507 return true;
38510 /* Return 1 iff OP is an operand that will not be affected by having
38511 vector doublewords swapped in memory. */
38512 static unsigned int
38513 rtx_is_swappable_p (rtx op, unsigned int *special)
38515 enum rtx_code code = GET_CODE (op);
38516 int i, j;
38517 rtx parallel;
38519 switch (code)
38521 case LABEL_REF:
38522 case SYMBOL_REF:
38523 case CLOBBER:
38524 case REG:
38525 return 1;
38527 case VEC_CONCAT:
38528 case ASM_INPUT:
38529 case ASM_OPERANDS:
38530 return 0;
38532 case CONST_VECTOR:
38534 *special = SH_CONST_VECTOR;
38535 return 1;
38538 case VEC_DUPLICATE:
38539 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
38540 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
38541 it represents a vector splat for which we can do special
38542 handling. */
38543 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
38544 return 1;
38545 else if (GET_CODE (XEXP (op, 0)) == REG
38546 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
38547 /* This catches V2DF and V2DI splat, at a minimum. */
38548 return 1;
38549 else if (GET_CODE (XEXP (op, 0)) == VEC_SELECT)
38550 /* If the duplicated item is from a select, defer to the select
38551 processing to see if we can change the lane for the splat. */
38552 return rtx_is_swappable_p (XEXP (op, 0), special);
38553 else
38554 return 0;
38556 case VEC_SELECT:
38557 /* A vec_extract operation is ok if we change the lane. */
38558 if (GET_CODE (XEXP (op, 0)) == REG
38559 && GET_MODE_INNER (GET_MODE (XEXP (op, 0))) == GET_MODE (op)
38560 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
38561 && XVECLEN (parallel, 0) == 1
38562 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT)
38564 *special = SH_EXTRACT;
38565 return 1;
38567 /* An XXPERMDI is ok if we adjust the lanes. Note that if the
38568 XXPERMDI is a swap operation, it will be identified by
38569 insn_is_swap_p and therefore we won't get here. */
38570 else if (GET_CODE (XEXP (op, 0)) == VEC_CONCAT
38571 && (GET_MODE (XEXP (op, 0)) == V4DFmode
38572 || GET_MODE (XEXP (op, 0)) == V4DImode)
38573 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
38574 && XVECLEN (parallel, 0) == 2
38575 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT
38576 && GET_CODE (XVECEXP (parallel, 0, 1)) == CONST_INT)
38578 *special = SH_XXPERMDI;
38579 return 1;
38581 else if (v2df_reduction_p (op))
38582 return 1;
38583 else
38584 return 0;
38586 case UNSPEC:
38588 /* Various operations are unsafe for this optimization, at least
38589 without significant additional work. Permutes are obviously
38590 problematic, as both the permute control vector and the ordering
38591 of the target values are invalidated by doubleword swapping.
38592 Vector pack and unpack modify the number of vector lanes.
38593 Merge-high/low will not operate correctly on swapped operands.
38594 Vector shifts across element boundaries are clearly uncool,
38595 as are vector select and concatenate operations. Vector
38596 sum-across instructions define one operand with a specific
38597 order-dependent element, so additional fixup code would be
38598 needed to make those work. Vector set and non-immediate-form
38599 vector splat are element-order sensitive. A few of these
38600 cases might be workable with special handling if required.
38601 Adding cost modeling would be appropriate in some cases. */
38602 int val = XINT (op, 1);
38603 switch (val)
38605 default:
38606 break;
38607 case UNSPEC_VMRGH_DIRECT:
38608 case UNSPEC_VMRGL_DIRECT:
38609 case UNSPEC_VPACK_SIGN_SIGN_SAT:
38610 case UNSPEC_VPACK_SIGN_UNS_SAT:
38611 case UNSPEC_VPACK_UNS_UNS_MOD:
38612 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT:
38613 case UNSPEC_VPACK_UNS_UNS_SAT:
38614 case UNSPEC_VPERM:
38615 case UNSPEC_VPERM_UNS:
38616 case UNSPEC_VPERMHI:
38617 case UNSPEC_VPERMSI:
38618 case UNSPEC_VPKPX:
38619 case UNSPEC_VSLDOI:
38620 case UNSPEC_VSLO:
38621 case UNSPEC_VSRO:
38622 case UNSPEC_VSUM2SWS:
38623 case UNSPEC_VSUM4S:
38624 case UNSPEC_VSUM4UBS:
38625 case UNSPEC_VSUMSWS:
38626 case UNSPEC_VSUMSWS_DIRECT:
38627 case UNSPEC_VSX_CONCAT:
38628 case UNSPEC_VSX_SET:
38629 case UNSPEC_VSX_SLDWI:
38630 case UNSPEC_VUNPACK_HI_SIGN:
38631 case UNSPEC_VUNPACK_HI_SIGN_DIRECT:
38632 case UNSPEC_VUNPACK_LO_SIGN:
38633 case UNSPEC_VUNPACK_LO_SIGN_DIRECT:
38634 case UNSPEC_VUPKHPX:
38635 case UNSPEC_VUPKHS_V4SF:
38636 case UNSPEC_VUPKHU_V4SF:
38637 case UNSPEC_VUPKLPX:
38638 case UNSPEC_VUPKLS_V4SF:
38639 case UNSPEC_VUPKLU_V4SF:
38640 case UNSPEC_VSX_CVDPSPN:
38641 case UNSPEC_VSX_CVSPDP:
38642 case UNSPEC_VSX_CVSPDPN:
38643 return 0;
38644 case UNSPEC_VSPLT_DIRECT:
38645 *special = SH_SPLAT;
38646 return 1;
38647 case UNSPEC_REDUC_PLUS:
38648 case UNSPEC_REDUC:
38649 return 1;
38653 default:
38654 break;
38657 const char *fmt = GET_RTX_FORMAT (code);
38658 int ok = 1;
38660 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
38661 if (fmt[i] == 'e' || fmt[i] == 'u')
38663 unsigned int special_op = SH_NONE;
38664 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
38665 if (special_op == SH_NONE)
38666 continue;
38667 /* Ensure we never have two kinds of special handling
38668 for the same insn. */
38669 if (*special != SH_NONE && *special != special_op)
38670 return 0;
38671 *special = special_op;
38673 else if (fmt[i] == 'E')
38674 for (j = 0; j < XVECLEN (op, i); ++j)
38676 unsigned int special_op = SH_NONE;
38677 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
38678 if (special_op == SH_NONE)
38679 continue;
38680 /* Ensure we never have two kinds of special handling
38681 for the same insn. */
38682 if (*special != SH_NONE && *special != special_op)
38683 return 0;
38684 *special = special_op;
38687 return ok;
38690 /* Return 1 iff INSN is an operand that will not be affected by
38691 having vector doublewords swapped in memory (in which case
38692 *SPECIAL is unchanged), or that can be modified to be correct
38693 if vector doublewords are swapped in memory (in which case
38694 *SPECIAL is changed to a value indicating how). */
38695 static unsigned int
38696 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
38697 unsigned int *special)
38699 /* Calls are always bad. */
38700 if (GET_CODE (insn) == CALL_INSN)
38701 return 0;
38703 /* Loads and stores seen here are not permuting, but we can still
38704 fix them up by converting them to permuting ones. Exceptions:
38705 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
38706 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
38707 for the SET source. Also we must now make an exception for lvx
38708 and stvx when they are not in the UNSPEC_LVX/STVX form (with the
38709 explicit "& -16") since this leads to unrecognizable insns. */
38710 rtx body = PATTERN (insn);
38711 int i = INSN_UID (insn);
38713 if (insn_entry[i].is_load)
38715 if (GET_CODE (body) == SET)
38717 rtx rhs = SET_SRC (body);
38718 gcc_assert (GET_CODE (rhs) == MEM);
38719 if (GET_CODE (XEXP (rhs, 0)) == AND)
38720 return 0;
38722 *special = SH_NOSWAP_LD;
38723 return 1;
38725 else
38726 return 0;
38729 if (insn_entry[i].is_store)
38731 if (GET_CODE (body) == SET
38732 && GET_CODE (SET_SRC (body)) != UNSPEC)
38734 rtx lhs = SET_DEST (body);
38735 gcc_assert (GET_CODE (lhs) == MEM);
38736 if (GET_CODE (XEXP (lhs, 0)) == AND)
38737 return 0;
38739 *special = SH_NOSWAP_ST;
38740 return 1;
38742 else
38743 return 0;
38746 /* A convert to single precision can be left as is provided that
38747 all of its uses are in xxspltw instructions that splat BE element
38748 zero. */
38749 if (GET_CODE (body) == SET
38750 && GET_CODE (SET_SRC (body)) == UNSPEC
38751 && XINT (SET_SRC (body), 1) == UNSPEC_VSX_CVDPSPN)
38753 df_ref def;
38754 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38756 FOR_EACH_INSN_INFO_DEF (def, insn_info)
38758 struct df_link *link = DF_REF_CHAIN (def);
38759 if (!link)
38760 return 0;
38762 for (; link; link = link->next) {
38763 rtx use_insn = DF_REF_INSN (link->ref);
38764 rtx use_body = PATTERN (use_insn);
38765 if (GET_CODE (use_body) != SET
38766 || GET_CODE (SET_SRC (use_body)) != UNSPEC
38767 || XINT (SET_SRC (use_body), 1) != UNSPEC_VSX_XXSPLTW
38768 || XEXP (XEXP (SET_SRC (use_body), 0), 1) != const0_rtx)
38769 return 0;
38773 return 1;
38776 /* A concatenation of two doublewords is ok if we reverse the
38777 order of the inputs. */
38778 if (GET_CODE (body) == SET
38779 && GET_CODE (SET_SRC (body)) == VEC_CONCAT
38780 && (GET_MODE (SET_SRC (body)) == V2DFmode
38781 || GET_MODE (SET_SRC (body)) == V2DImode))
38783 *special = SH_CONCAT;
38784 return 1;
38787 /* V2DF reductions are always swappable. */
38788 if (GET_CODE (body) == PARALLEL)
38790 rtx expr = XVECEXP (body, 0, 0);
38791 if (GET_CODE (expr) == SET
38792 && v2df_reduction_p (SET_SRC (expr)))
38793 return 1;
38796 /* An UNSPEC_VPERM is ok if the mask operand is loaded from the
38797 constant pool. */
38798 if (GET_CODE (body) == SET
38799 && GET_CODE (SET_SRC (body)) == UNSPEC
38800 && XINT (SET_SRC (body), 1) == UNSPEC_VPERM
38801 && XVECLEN (SET_SRC (body), 0) == 3
38802 && GET_CODE (XVECEXP (SET_SRC (body), 0, 2)) == REG)
38804 rtx mask_reg = XVECEXP (SET_SRC (body), 0, 2);
38805 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38806 df_ref use;
38807 FOR_EACH_INSN_INFO_USE (use, insn_info)
38808 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
38810 struct df_link *def_link = DF_REF_CHAIN (use);
38811 /* Punt if multiple definitions for this reg. */
38812 if (def_link && !def_link->next &&
38813 const_load_sequence_p (insn_entry,
38814 DF_REF_INSN (def_link->ref)))
38816 *special = SH_VPERM;
38817 return 1;
38822 /* Otherwise check the operands for vector lane violations. */
38823 return rtx_is_swappable_p (body, special);
38826 enum chain_purpose { FOR_LOADS, FOR_STORES };
38828 /* Return true if the UD or DU chain headed by LINK is non-empty,
38829 and every entry on the chain references an insn that is a
38830 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
38831 register swap must have only permuting loads as reaching defs.
38832 If PURPOSE is FOR_STORES, each such register swap must have only
38833 register swaps or permuting stores as reached uses. */
38834 static bool
38835 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
38836 enum chain_purpose purpose)
38838 if (!link)
38839 return false;
38841 for (; link; link = link->next)
38843 if (!ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (DF_REF_REG (link->ref))))
38844 continue;
38846 if (DF_REF_IS_ARTIFICIAL (link->ref))
38847 return false;
38849 rtx reached_insn = DF_REF_INSN (link->ref);
38850 unsigned uid = INSN_UID (reached_insn);
38851 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
38853 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
38854 || insn_entry[uid].is_store)
38855 return false;
38857 if (purpose == FOR_LOADS)
38859 df_ref use;
38860 FOR_EACH_INSN_INFO_USE (use, insn_info)
38862 struct df_link *swap_link = DF_REF_CHAIN (use);
38864 while (swap_link)
38866 if (DF_REF_IS_ARTIFICIAL (link->ref))
38867 return false;
38869 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
38870 unsigned uid2 = INSN_UID (swap_def_insn);
38872 /* Only permuting loads are allowed. */
38873 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
38874 return false;
38876 swap_link = swap_link->next;
38880 else if (purpose == FOR_STORES)
38882 df_ref def;
38883 FOR_EACH_INSN_INFO_DEF (def, insn_info)
38885 struct df_link *swap_link = DF_REF_CHAIN (def);
38887 while (swap_link)
38889 if (DF_REF_IS_ARTIFICIAL (link->ref))
38890 return false;
38892 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
38893 unsigned uid2 = INSN_UID (swap_use_insn);
38895 /* Permuting stores or register swaps are allowed. */
38896 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
38897 return false;
38899 swap_link = swap_link->next;
38905 return true;
38908 /* Mark the xxswapdi instructions associated with permuting loads and
38909 stores for removal. Note that we only flag them for deletion here,
38910 as there is a possibility of a swap being reached from multiple
38911 loads, etc. */
38912 static void
38913 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
38915 rtx insn = insn_entry[i].insn;
38916 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38918 if (insn_entry[i].is_load)
38920 df_ref def;
38921 FOR_EACH_INSN_INFO_DEF (def, insn_info)
38923 struct df_link *link = DF_REF_CHAIN (def);
38925 /* We know by now that these are swaps, so we can delete
38926 them confidently. */
38927 while (link)
38929 rtx use_insn = DF_REF_INSN (link->ref);
38930 insn_entry[INSN_UID (use_insn)].will_delete = 1;
38931 link = link->next;
38935 else if (insn_entry[i].is_store)
38937 df_ref use;
38938 FOR_EACH_INSN_INFO_USE (use, insn_info)
38940 /* Ignore uses for addressability. */
38941 machine_mode mode = GET_MODE (DF_REF_REG (use));
38942 if (!ALTIVEC_OR_VSX_VECTOR_MODE (mode))
38943 continue;
38945 struct df_link *link = DF_REF_CHAIN (use);
38947 /* We know by now that these are swaps, so we can delete
38948 them confidently. */
38949 while (link)
38951 rtx def_insn = DF_REF_INSN (link->ref);
38952 insn_entry[INSN_UID (def_insn)].will_delete = 1;
38953 link = link->next;
38959 /* OP is either a CONST_VECTOR or an expression containing one.
38960 Swap the first half of the vector with the second in the first
38961 case. Recurse to find it in the second. */
38962 static void
38963 swap_const_vector_halves (rtx op)
38965 int i;
38966 enum rtx_code code = GET_CODE (op);
38967 if (GET_CODE (op) == CONST_VECTOR)
38969 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
38970 for (i = 0; i < half_units; ++i)
38972 rtx temp = CONST_VECTOR_ELT (op, i);
38973 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
38974 CONST_VECTOR_ELT (op, i + half_units) = temp;
38977 else
38979 int j;
38980 const char *fmt = GET_RTX_FORMAT (code);
38981 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
38982 if (fmt[i] == 'e' || fmt[i] == 'u')
38983 swap_const_vector_halves (XEXP (op, i));
38984 else if (fmt[i] == 'E')
38985 for (j = 0; j < XVECLEN (op, i); ++j)
38986 swap_const_vector_halves (XVECEXP (op, i, j));
38990 /* Find all subregs of a vector expression that perform a narrowing,
38991 and adjust the subreg index to account for doubleword swapping. */
38992 static void
38993 adjust_subreg_index (rtx op)
38995 enum rtx_code code = GET_CODE (op);
38996 if (code == SUBREG
38997 && (GET_MODE_SIZE (GET_MODE (op))
38998 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
39000 unsigned int index = SUBREG_BYTE (op);
39001 if (index < 8)
39002 index += 8;
39003 else
39004 index -= 8;
39005 SUBREG_BYTE (op) = index;
39008 const char *fmt = GET_RTX_FORMAT (code);
39009 int i,j;
39010 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
39011 if (fmt[i] == 'e' || fmt[i] == 'u')
39012 adjust_subreg_index (XEXP (op, i));
39013 else if (fmt[i] == 'E')
39014 for (j = 0; j < XVECLEN (op, i); ++j)
39015 adjust_subreg_index (XVECEXP (op, i, j));
39018 /* Convert the non-permuting load INSN to a permuting one. */
39019 static void
39020 permute_load (rtx_insn *insn)
39022 rtx body = PATTERN (insn);
39023 rtx mem_op = SET_SRC (body);
39024 rtx tgt_reg = SET_DEST (body);
39025 machine_mode mode = GET_MODE (tgt_reg);
39026 int n_elts = GET_MODE_NUNITS (mode);
39027 int half_elts = n_elts / 2;
39028 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
39029 int i, j;
39030 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
39031 XVECEXP (par, 0, i) = GEN_INT (j);
39032 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
39033 XVECEXP (par, 0, i) = GEN_INT (j);
39034 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
39035 SET_SRC (body) = sel;
39036 INSN_CODE (insn) = -1; /* Force re-recognition. */
39037 df_insn_rescan (insn);
39039 if (dump_file)
39040 fprintf (dump_file, "Replacing load %d with permuted load\n",
39041 INSN_UID (insn));
39044 /* Convert the non-permuting store INSN to a permuting one. */
39045 static void
39046 permute_store (rtx_insn *insn)
39048 rtx body = PATTERN (insn);
39049 rtx src_reg = SET_SRC (body);
39050 machine_mode mode = GET_MODE (src_reg);
39051 int n_elts = GET_MODE_NUNITS (mode);
39052 int half_elts = n_elts / 2;
39053 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
39054 int i, j;
39055 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
39056 XVECEXP (par, 0, i) = GEN_INT (j);
39057 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
39058 XVECEXP (par, 0, i) = GEN_INT (j);
39059 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
39060 SET_SRC (body) = sel;
39061 INSN_CODE (insn) = -1; /* Force re-recognition. */
39062 df_insn_rescan (insn);
39064 if (dump_file)
39065 fprintf (dump_file, "Replacing store %d with permuted store\n",
39066 INSN_UID (insn));
39069 /* Given OP that contains a vector extract operation, adjust the index
39070 of the extracted lane to account for the doubleword swap. */
39071 static void
39072 adjust_extract (rtx_insn *insn)
39074 rtx pattern = PATTERN (insn);
39075 if (GET_CODE (pattern) == PARALLEL)
39076 pattern = XVECEXP (pattern, 0, 0);
39077 rtx src = SET_SRC (pattern);
39078 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
39079 account for that. */
39080 rtx sel = GET_CODE (src) == VEC_DUPLICATE ? XEXP (src, 0) : src;
39081 rtx par = XEXP (sel, 1);
39082 int half_elts = GET_MODE_NUNITS (GET_MODE (XEXP (sel, 0))) >> 1;
39083 int lane = INTVAL (XVECEXP (par, 0, 0));
39084 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
39085 XVECEXP (par, 0, 0) = GEN_INT (lane);
39086 INSN_CODE (insn) = -1; /* Force re-recognition. */
39087 df_insn_rescan (insn);
39089 if (dump_file)
39090 fprintf (dump_file, "Changing lane for extract %d\n", INSN_UID (insn));
39093 /* Given OP that contains a vector direct-splat operation, adjust the index
39094 of the source lane to account for the doubleword swap. */
39095 static void
39096 adjust_splat (rtx_insn *insn)
39098 rtx body = PATTERN (insn);
39099 rtx unspec = XEXP (body, 1);
39100 int half_elts = GET_MODE_NUNITS (GET_MODE (unspec)) >> 1;
39101 int lane = INTVAL (XVECEXP (unspec, 0, 1));
39102 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
39103 XVECEXP (unspec, 0, 1) = GEN_INT (lane);
39104 INSN_CODE (insn) = -1; /* Force re-recognition. */
39105 df_insn_rescan (insn);
39107 if (dump_file)
39108 fprintf (dump_file, "Changing lane for splat %d\n", INSN_UID (insn));
39111 /* Given OP that contains an XXPERMDI operation (that is not a doubleword
39112 swap), reverse the order of the source operands and adjust the indices
39113 of the source lanes to account for doubleword reversal. */
39114 static void
39115 adjust_xxpermdi (rtx_insn *insn)
39117 rtx set = PATTERN (insn);
39118 rtx select = XEXP (set, 1);
39119 rtx concat = XEXP (select, 0);
39120 rtx src0 = XEXP (concat, 0);
39121 XEXP (concat, 0) = XEXP (concat, 1);
39122 XEXP (concat, 1) = src0;
39123 rtx parallel = XEXP (select, 1);
39124 int lane0 = INTVAL (XVECEXP (parallel, 0, 0));
39125 int lane1 = INTVAL (XVECEXP (parallel, 0, 1));
39126 int new_lane0 = 3 - lane1;
39127 int new_lane1 = 3 - lane0;
39128 XVECEXP (parallel, 0, 0) = GEN_INT (new_lane0);
39129 XVECEXP (parallel, 0, 1) = GEN_INT (new_lane1);
39130 INSN_CODE (insn) = -1; /* Force re-recognition. */
39131 df_insn_rescan (insn);
39133 if (dump_file)
39134 fprintf (dump_file, "Changing lanes for xxpermdi %d\n", INSN_UID (insn));
39137 /* Given OP that contains a VEC_CONCAT operation of two doublewords,
39138 reverse the order of those inputs. */
39139 static void
39140 adjust_concat (rtx_insn *insn)
39142 rtx set = PATTERN (insn);
39143 rtx concat = XEXP (set, 1);
39144 rtx src0 = XEXP (concat, 0);
39145 XEXP (concat, 0) = XEXP (concat, 1);
39146 XEXP (concat, 1) = src0;
39147 INSN_CODE (insn) = -1; /* Force re-recognition. */
39148 df_insn_rescan (insn);
39150 if (dump_file)
39151 fprintf (dump_file, "Reversing inputs for concat %d\n", INSN_UID (insn));
39154 /* Given an UNSPEC_VPERM insn, modify the mask loaded from the
39155 constant pool to reflect swapped doublewords. */
39156 static void
39157 adjust_vperm (rtx_insn *insn)
39159 /* We previously determined that the UNSPEC_VPERM was fed by a
39160 swap of a swapping load of a TOC-relative constant pool symbol.
39161 Find the MEM in the swapping load and replace it with a MEM for
39162 the adjusted mask constant. */
39163 rtx set = PATTERN (insn);
39164 rtx mask_reg = XVECEXP (SET_SRC (set), 0, 2);
39166 /* Find the swap. */
39167 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39168 df_ref use;
39169 rtx_insn *swap_insn = 0;
39170 FOR_EACH_INSN_INFO_USE (use, insn_info)
39171 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
39173 struct df_link *def_link = DF_REF_CHAIN (use);
39174 gcc_assert (def_link && !def_link->next);
39175 swap_insn = DF_REF_INSN (def_link->ref);
39176 break;
39178 gcc_assert (swap_insn);
39180 /* Find the load. */
39181 insn_info = DF_INSN_INFO_GET (swap_insn);
39182 rtx_insn *load_insn = 0;
39183 FOR_EACH_INSN_INFO_USE (use, insn_info)
39185 struct df_link *def_link = DF_REF_CHAIN (use);
39186 gcc_assert (def_link && !def_link->next);
39187 load_insn = DF_REF_INSN (def_link->ref);
39188 break;
39190 gcc_assert (load_insn);
39192 /* Find the TOC-relative symbol access. */
39193 insn_info = DF_INSN_INFO_GET (load_insn);
39194 rtx_insn *tocrel_insn = 0;
39195 FOR_EACH_INSN_INFO_USE (use, insn_info)
39197 struct df_link *def_link = DF_REF_CHAIN (use);
39198 gcc_assert (def_link && !def_link->next);
39199 tocrel_insn = DF_REF_INSN (def_link->ref);
39200 break;
39202 gcc_assert (tocrel_insn);
39204 /* Find the embedded CONST_VECTOR. We have to call toc_relative_expr_p
39205 to set tocrel_base; otherwise it would be unnecessary as we've
39206 already established it will return true. */
39207 rtx base, offset;
39208 rtx tocrel_expr = SET_SRC (PATTERN (tocrel_insn));
39209 /* There is an extra level of indirection for small/large code models. */
39210 if (GET_CODE (tocrel_expr) == MEM)
39211 tocrel_expr = XEXP (tocrel_expr, 0);
39212 if (!toc_relative_expr_p (tocrel_expr, false))
39213 gcc_unreachable ();
39214 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
39215 rtx const_vector = get_pool_constant (base);
39216 /* With the extra indirection, get_pool_constant will produce the
39217 real constant from the reg_equal expression, so get the real
39218 constant. */
39219 if (GET_CODE (const_vector) == SYMBOL_REF)
39220 const_vector = get_pool_constant (const_vector);
39221 gcc_assert (GET_CODE (const_vector) == CONST_VECTOR);
39223 /* Create an adjusted mask from the initial mask. */
39224 unsigned int new_mask[16], i, val;
39225 for (i = 0; i < 16; ++i) {
39226 val = INTVAL (XVECEXP (const_vector, 0, i));
39227 if (val < 16)
39228 new_mask[i] = (val + 8) % 16;
39229 else
39230 new_mask[i] = ((val + 8) % 16) + 16;
39233 /* Create a new CONST_VECTOR and a MEM that references it. */
39234 rtx vals = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
39235 for (i = 0; i < 16; ++i)
39236 XVECEXP (vals, 0, i) = GEN_INT (new_mask[i]);
39237 rtx new_const_vector = gen_rtx_CONST_VECTOR (V16QImode, XVEC (vals, 0));
39238 rtx new_mem = force_const_mem (V16QImode, new_const_vector);
39239 /* This gives us a MEM whose base operand is a SYMBOL_REF, which we
39240 can't recognize. Force the SYMBOL_REF into a register. */
39241 if (!REG_P (XEXP (new_mem, 0))) {
39242 rtx base_reg = force_reg (Pmode, XEXP (new_mem, 0));
39243 XEXP (new_mem, 0) = base_reg;
39244 /* Move the newly created insn ahead of the load insn. */
39245 rtx_insn *force_insn = get_last_insn ();
39246 remove_insn (force_insn);
39247 rtx_insn *before_load_insn = PREV_INSN (load_insn);
39248 add_insn_after (force_insn, before_load_insn, BLOCK_FOR_INSN (load_insn));
39249 df_insn_rescan (before_load_insn);
39250 df_insn_rescan (force_insn);
39253 /* Replace the MEM in the load instruction and rescan it. */
39254 XEXP (SET_SRC (PATTERN (load_insn)), 0) = new_mem;
39255 INSN_CODE (load_insn) = -1; /* Force re-recognition. */
39256 df_insn_rescan (load_insn);
39258 if (dump_file)
39259 fprintf (dump_file, "Adjusting mask for vperm %d\n", INSN_UID (insn));
39262 /* The insn described by INSN_ENTRY[I] can be swapped, but only
39263 with special handling. Take care of that here. */
39264 static void
39265 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
39267 rtx_insn *insn = insn_entry[i].insn;
39268 rtx body = PATTERN (insn);
39270 switch (insn_entry[i].special_handling)
39272 default:
39273 gcc_unreachable ();
39274 case SH_CONST_VECTOR:
39276 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
39277 gcc_assert (GET_CODE (body) == SET);
39278 rtx rhs = SET_SRC (body);
39279 swap_const_vector_halves (rhs);
39280 if (dump_file)
39281 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
39282 break;
39284 case SH_SUBREG:
39285 /* A subreg of the same size is already safe. For subregs that
39286 select a smaller portion of a reg, adjust the index for
39287 swapped doublewords. */
39288 adjust_subreg_index (body);
39289 if (dump_file)
39290 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
39291 break;
39292 case SH_NOSWAP_LD:
39293 /* Convert a non-permuting load to a permuting one. */
39294 permute_load (insn);
39295 break;
39296 case SH_NOSWAP_ST:
39297 /* Convert a non-permuting store to a permuting one. */
39298 permute_store (insn);
39299 break;
39300 case SH_EXTRACT:
39301 /* Change the lane on an extract operation. */
39302 adjust_extract (insn);
39303 break;
39304 case SH_SPLAT:
39305 /* Change the lane on a direct-splat operation. */
39306 adjust_splat (insn);
39307 break;
39308 case SH_XXPERMDI:
39309 /* Change the lanes on an XXPERMDI operation. */
39310 adjust_xxpermdi (insn);
39311 break;
39312 case SH_CONCAT:
39313 /* Reverse the order of a concatenation operation. */
39314 adjust_concat (insn);
39315 break;
39316 case SH_VPERM:
39317 /* Change the mask loaded from the constant pool for a VPERM. */
39318 adjust_vperm (insn);
39319 break;
39323 /* Find the insn from the Ith table entry, which is known to be a
39324 register swap Y = SWAP(X). Replace it with a copy Y = X. */
39325 static void
39326 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
39328 rtx_insn *insn = insn_entry[i].insn;
39329 rtx body = PATTERN (insn);
39330 rtx src_reg = XEXP (SET_SRC (body), 0);
39331 rtx copy = gen_rtx_SET (SET_DEST (body), src_reg);
39332 rtx_insn *new_insn = emit_insn_before (copy, insn);
39333 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
39334 df_insn_rescan (new_insn);
39336 if (dump_file)
39338 unsigned int new_uid = INSN_UID (new_insn);
39339 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
39342 df_insn_delete (insn);
39343 remove_insn (insn);
39344 insn->set_deleted ();
39347 /* Dump the swap table to DUMP_FILE. */
39348 static void
39349 dump_swap_insn_table (swap_web_entry *insn_entry)
39351 int e = get_max_uid ();
39352 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
39354 for (int i = 0; i < e; ++i)
39355 if (insn_entry[i].is_relevant)
39357 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
39358 fprintf (dump_file, "%6d %6d ", i,
39359 pred_entry && pred_entry->insn
39360 ? INSN_UID (pred_entry->insn) : 0);
39361 if (insn_entry[i].is_load)
39362 fputs ("load ", dump_file);
39363 if (insn_entry[i].is_store)
39364 fputs ("store ", dump_file);
39365 if (insn_entry[i].is_swap)
39366 fputs ("swap ", dump_file);
39367 if (insn_entry[i].is_live_in)
39368 fputs ("live-in ", dump_file);
39369 if (insn_entry[i].is_live_out)
39370 fputs ("live-out ", dump_file);
39371 if (insn_entry[i].contains_subreg)
39372 fputs ("subreg ", dump_file);
39373 if (insn_entry[i].is_128_int)
39374 fputs ("int128 ", dump_file);
39375 if (insn_entry[i].is_call)
39376 fputs ("call ", dump_file);
39377 if (insn_entry[i].is_swappable)
39379 fputs ("swappable ", dump_file);
39380 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
39381 fputs ("special:constvec ", dump_file);
39382 else if (insn_entry[i].special_handling == SH_SUBREG)
39383 fputs ("special:subreg ", dump_file);
39384 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
39385 fputs ("special:load ", dump_file);
39386 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
39387 fputs ("special:store ", dump_file);
39388 else if (insn_entry[i].special_handling == SH_EXTRACT)
39389 fputs ("special:extract ", dump_file);
39390 else if (insn_entry[i].special_handling == SH_SPLAT)
39391 fputs ("special:splat ", dump_file);
39392 else if (insn_entry[i].special_handling == SH_XXPERMDI)
39393 fputs ("special:xxpermdi ", dump_file);
39394 else if (insn_entry[i].special_handling == SH_CONCAT)
39395 fputs ("special:concat ", dump_file);
39396 else if (insn_entry[i].special_handling == SH_VPERM)
39397 fputs ("special:vperm ", dump_file);
39399 if (insn_entry[i].web_not_optimizable)
39400 fputs ("unoptimizable ", dump_file);
39401 if (insn_entry[i].will_delete)
39402 fputs ("delete ", dump_file);
39403 fputs ("\n", dump_file);
39405 fputs ("\n", dump_file);
39408 /* Return RTX with its address canonicalized to (reg) or (+ reg reg).
39409 Here RTX is an (& addr (const_int -16)). Always return a new copy
39410 to avoid problems with combine. */
39411 static rtx
39412 alignment_with_canonical_addr (rtx align)
39414 rtx canon;
39415 rtx addr = XEXP (align, 0);
39417 if (REG_P (addr))
39418 canon = addr;
39420 else if (GET_CODE (addr) == PLUS)
39422 rtx addrop0 = XEXP (addr, 0);
39423 rtx addrop1 = XEXP (addr, 1);
39425 if (!REG_P (addrop0))
39426 addrop0 = force_reg (GET_MODE (addrop0), addrop0);
39428 if (!REG_P (addrop1))
39429 addrop1 = force_reg (GET_MODE (addrop1), addrop1);
39431 canon = gen_rtx_PLUS (GET_MODE (addr), addrop0, addrop1);
39434 else
39435 canon = force_reg (GET_MODE (addr), addr);
39437 return gen_rtx_AND (GET_MODE (align), canon, GEN_INT (-16));
39440 /* Check whether an rtx is an alignment mask, and if so, return
39441 a fully-expanded rtx for the masking operation. */
39442 static rtx
39443 alignment_mask (rtx_insn *insn)
39445 rtx body = PATTERN (insn);
39447 if (GET_CODE (body) != SET
39448 || GET_CODE (SET_SRC (body)) != AND
39449 || !REG_P (XEXP (SET_SRC (body), 0)))
39450 return 0;
39452 rtx mask = XEXP (SET_SRC (body), 1);
39454 if (GET_CODE (mask) == CONST_INT)
39456 if (INTVAL (mask) == -16)
39457 return alignment_with_canonical_addr (SET_SRC (body));
39458 else
39459 return 0;
39462 if (!REG_P (mask))
39463 return 0;
39465 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39466 df_ref use;
39467 rtx real_mask = 0;
39469 FOR_EACH_INSN_INFO_USE (use, insn_info)
39471 if (!rtx_equal_p (DF_REF_REG (use), mask))
39472 continue;
39474 struct df_link *def_link = DF_REF_CHAIN (use);
39475 if (!def_link || def_link->next)
39476 return 0;
39478 rtx_insn *const_insn = DF_REF_INSN (def_link->ref);
39479 rtx const_body = PATTERN (const_insn);
39480 if (GET_CODE (const_body) != SET)
39481 return 0;
39483 real_mask = SET_SRC (const_body);
39485 if (GET_CODE (real_mask) != CONST_INT
39486 || INTVAL (real_mask) != -16)
39487 return 0;
39490 if (real_mask == 0)
39491 return 0;
39493 return alignment_with_canonical_addr (SET_SRC (body));
39496 /* Given INSN that's a load or store based at BASE_REG, look for a
39497 feeding computation that aligns its address on a 16-byte boundary. */
39498 static rtx
39499 find_alignment_op (rtx_insn *insn, rtx base_reg)
39501 df_ref base_use;
39502 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39503 rtx and_operation = 0;
39505 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
39507 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
39508 continue;
39510 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
39511 if (!base_def_link || base_def_link->next)
39512 break;
39514 rtx_insn *and_insn = DF_REF_INSN (base_def_link->ref);
39515 and_operation = alignment_mask (and_insn);
39516 if (and_operation != 0)
39517 break;
39520 return and_operation;
39523 struct del_info { bool replace; rtx_insn *replace_insn; };
39525 /* If INSN is the load for an lvx pattern, put it in canonical form. */
39526 static void
39527 recombine_lvx_pattern (rtx_insn *insn, del_info *to_delete)
39529 rtx body = PATTERN (insn);
39530 gcc_assert (GET_CODE (body) == SET
39531 && GET_CODE (SET_SRC (body)) == VEC_SELECT
39532 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM);
39534 rtx mem = XEXP (SET_SRC (body), 0);
39535 rtx base_reg = XEXP (mem, 0);
39537 rtx and_operation = find_alignment_op (insn, base_reg);
39539 if (and_operation != 0)
39541 df_ref def;
39542 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39543 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39545 struct df_link *link = DF_REF_CHAIN (def);
39546 if (!link || link->next)
39547 break;
39549 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
39550 if (!insn_is_swap_p (swap_insn)
39551 || insn_is_load_p (swap_insn)
39552 || insn_is_store_p (swap_insn))
39553 break;
39555 /* Expected lvx pattern found. Change the swap to
39556 a copy, and propagate the AND operation into the
39557 load. */
39558 to_delete[INSN_UID (swap_insn)].replace = true;
39559 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
39561 XEXP (mem, 0) = and_operation;
39562 SET_SRC (body) = mem;
39563 INSN_CODE (insn) = -1; /* Force re-recognition. */
39564 df_insn_rescan (insn);
39566 if (dump_file)
39567 fprintf (dump_file, "lvx opportunity found at %d\n",
39568 INSN_UID (insn));
39573 /* If INSN is the store for an stvx pattern, put it in canonical form. */
39574 static void
39575 recombine_stvx_pattern (rtx_insn *insn, del_info *to_delete)
39577 rtx body = PATTERN (insn);
39578 gcc_assert (GET_CODE (body) == SET
39579 && GET_CODE (SET_DEST (body)) == MEM
39580 && GET_CODE (SET_SRC (body)) == VEC_SELECT);
39581 rtx mem = SET_DEST (body);
39582 rtx base_reg = XEXP (mem, 0);
39584 rtx and_operation = find_alignment_op (insn, base_reg);
39586 if (and_operation != 0)
39588 rtx src_reg = XEXP (SET_SRC (body), 0);
39589 df_ref src_use;
39590 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39591 FOR_EACH_INSN_INFO_USE (src_use, insn_info)
39593 if (!rtx_equal_p (DF_REF_REG (src_use), src_reg))
39594 continue;
39596 struct df_link *link = DF_REF_CHAIN (src_use);
39597 if (!link || link->next)
39598 break;
39600 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
39601 if (!insn_is_swap_p (swap_insn)
39602 || insn_is_load_p (swap_insn)
39603 || insn_is_store_p (swap_insn))
39604 break;
39606 /* Expected stvx pattern found. Change the swap to
39607 a copy, and propagate the AND operation into the
39608 store. */
39609 to_delete[INSN_UID (swap_insn)].replace = true;
39610 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
39612 XEXP (mem, 0) = and_operation;
39613 SET_SRC (body) = src_reg;
39614 INSN_CODE (insn) = -1; /* Force re-recognition. */
39615 df_insn_rescan (insn);
39617 if (dump_file)
39618 fprintf (dump_file, "stvx opportunity found at %d\n",
39619 INSN_UID (insn));
39624 /* Look for patterns created from builtin lvx and stvx calls, and
39625 canonicalize them to be properly recognized as such. */
39626 static void
39627 recombine_lvx_stvx_patterns (function *fun)
39629 int i;
39630 basic_block bb;
39631 rtx_insn *insn;
39633 int num_insns = get_max_uid ();
39634 del_info *to_delete = XCNEWVEC (del_info, num_insns);
39636 FOR_ALL_BB_FN (bb, fun)
39637 FOR_BB_INSNS (bb, insn)
39639 if (!NONDEBUG_INSN_P (insn))
39640 continue;
39642 if (insn_is_load_p (insn) && insn_is_swap_p (insn))
39643 recombine_lvx_pattern (insn, to_delete);
39644 else if (insn_is_store_p (insn) && insn_is_swap_p (insn))
39645 recombine_stvx_pattern (insn, to_delete);
39648 /* Turning swaps into copies is delayed until now, to avoid problems
39649 with deleting instructions during the insn walk. */
39650 for (i = 0; i < num_insns; i++)
39651 if (to_delete[i].replace)
39653 rtx swap_body = PATTERN (to_delete[i].replace_insn);
39654 rtx src_reg = XEXP (SET_SRC (swap_body), 0);
39655 rtx copy = gen_rtx_SET (SET_DEST (swap_body), src_reg);
39656 rtx_insn *new_insn = emit_insn_before (copy,
39657 to_delete[i].replace_insn);
39658 set_block_for_insn (new_insn,
39659 BLOCK_FOR_INSN (to_delete[i].replace_insn));
39660 df_insn_rescan (new_insn);
39661 df_insn_delete (to_delete[i].replace_insn);
39662 remove_insn (to_delete[i].replace_insn);
39663 to_delete[i].replace_insn->set_deleted ();
39666 free (to_delete);
39669 /* Main entry point for this pass. */
39670 unsigned int
39671 rs6000_analyze_swaps (function *fun)
39673 swap_web_entry *insn_entry;
39674 basic_block bb;
39675 rtx_insn *insn, *curr_insn = 0;
39677 /* Dataflow analysis for use-def chains. */
39678 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
39679 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
39680 df_analyze ();
39681 df_set_flags (DF_DEFER_INSN_RESCAN);
39683 /* Pre-pass to recombine lvx and stvx patterns so we don't lose info. */
39684 recombine_lvx_stvx_patterns (fun);
39686 /* Allocate structure to represent webs of insns. */
39687 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
39689 /* Walk the insns to gather basic data. */
39690 FOR_ALL_BB_FN (bb, fun)
39691 FOR_BB_INSNS_SAFE (bb, insn, curr_insn)
39693 unsigned int uid = INSN_UID (insn);
39694 if (NONDEBUG_INSN_P (insn))
39696 insn_entry[uid].insn = insn;
39698 if (GET_CODE (insn) == CALL_INSN)
39699 insn_entry[uid].is_call = 1;
39701 /* Walk the uses and defs to see if we mention vector regs.
39702 Record any constraints on optimization of such mentions. */
39703 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39704 df_ref mention;
39705 FOR_EACH_INSN_INFO_USE (mention, insn_info)
39707 /* We use DF_REF_REAL_REG here to get inside any subregs. */
39708 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
39710 /* If a use gets its value from a call insn, it will be
39711 a hard register and will look like (reg:V4SI 3 3).
39712 The df analysis creates two mentions for GPR3 and GPR4,
39713 both DImode. We must recognize this and treat it as a
39714 vector mention to ensure the call is unioned with this
39715 use. */
39716 if (mode == DImode && DF_REF_INSN_INFO (mention))
39718 rtx feeder = DF_REF_INSN (mention);
39719 /* FIXME: It is pretty hard to get from the df mention
39720 to the mode of the use in the insn. We arbitrarily
39721 pick a vector mode here, even though the use might
39722 be a real DImode. We can be too conservative
39723 (create a web larger than necessary) because of
39724 this, so consider eventually fixing this. */
39725 if (GET_CODE (feeder) == CALL_INSN)
39726 mode = V4SImode;
39729 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
39731 insn_entry[uid].is_relevant = 1;
39732 if (mode == TImode || mode == V1TImode
39733 || FLOAT128_VECTOR_P (mode))
39734 insn_entry[uid].is_128_int = 1;
39735 if (DF_REF_INSN_INFO (mention))
39736 insn_entry[uid].contains_subreg
39737 = !rtx_equal_p (DF_REF_REG (mention),
39738 DF_REF_REAL_REG (mention));
39739 union_defs (insn_entry, insn, mention);
39742 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
39744 /* We use DF_REF_REAL_REG here to get inside any subregs. */
39745 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
39747 /* If we're loading up a hard vector register for a call,
39748 it looks like (set (reg:V4SI 9 9) (...)). The df
39749 analysis creates two mentions for GPR9 and GPR10, both
39750 DImode. So relying on the mode from the mentions
39751 isn't sufficient to ensure we union the call into the
39752 web with the parameter setup code. */
39753 if (mode == DImode && GET_CODE (insn) == SET
39754 && ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (SET_DEST (insn))))
39755 mode = GET_MODE (SET_DEST (insn));
39757 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
39759 insn_entry[uid].is_relevant = 1;
39760 if (mode == TImode || mode == V1TImode
39761 || FLOAT128_VECTOR_P (mode))
39762 insn_entry[uid].is_128_int = 1;
39763 if (DF_REF_INSN_INFO (mention))
39764 insn_entry[uid].contains_subreg
39765 = !rtx_equal_p (DF_REF_REG (mention),
39766 DF_REF_REAL_REG (mention));
39767 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
39768 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
39769 insn_entry[uid].is_live_out = 1;
39770 union_uses (insn_entry, insn, mention);
39774 if (insn_entry[uid].is_relevant)
39776 /* Determine if this is a load or store. */
39777 insn_entry[uid].is_load = insn_is_load_p (insn);
39778 insn_entry[uid].is_store = insn_is_store_p (insn);
39780 /* Determine if this is a doubleword swap. If not,
39781 determine whether it can legally be swapped. */
39782 if (insn_is_swap_p (insn))
39783 insn_entry[uid].is_swap = 1;
39784 else
39786 unsigned int special = SH_NONE;
39787 insn_entry[uid].is_swappable
39788 = insn_is_swappable_p (insn_entry, insn, &special);
39789 if (special != SH_NONE && insn_entry[uid].contains_subreg)
39790 insn_entry[uid].is_swappable = 0;
39791 else if (special != SH_NONE)
39792 insn_entry[uid].special_handling = special;
39793 else if (insn_entry[uid].contains_subreg)
39794 insn_entry[uid].special_handling = SH_SUBREG;
39800 if (dump_file)
39802 fprintf (dump_file, "\nSwap insn entry table when first built\n");
39803 dump_swap_insn_table (insn_entry);
39806 /* Record unoptimizable webs. */
39807 unsigned e = get_max_uid (), i;
39808 for (i = 0; i < e; ++i)
39810 if (!insn_entry[i].is_relevant)
39811 continue;
39813 swap_web_entry *root
39814 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
39816 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
39817 || (insn_entry[i].contains_subreg
39818 && insn_entry[i].special_handling != SH_SUBREG)
39819 || insn_entry[i].is_128_int || insn_entry[i].is_call
39820 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
39821 root->web_not_optimizable = 1;
39823 /* If we have loads or stores that aren't permuting then the
39824 optimization isn't appropriate. */
39825 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
39826 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
39827 root->web_not_optimizable = 1;
39829 /* If we have permuting loads or stores that are not accompanied
39830 by a register swap, the optimization isn't appropriate. */
39831 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
39833 rtx insn = insn_entry[i].insn;
39834 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39835 df_ref def;
39837 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39839 struct df_link *link = DF_REF_CHAIN (def);
39841 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
39843 root->web_not_optimizable = 1;
39844 break;
39848 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
39850 rtx insn = insn_entry[i].insn;
39851 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39852 df_ref use;
39854 FOR_EACH_INSN_INFO_USE (use, insn_info)
39856 struct df_link *link = DF_REF_CHAIN (use);
39858 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
39860 root->web_not_optimizable = 1;
39861 break;
39867 if (dump_file)
39869 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
39870 dump_swap_insn_table (insn_entry);
39873 /* For each load and store in an optimizable web (which implies
39874 the loads and stores are permuting), find the associated
39875 register swaps and mark them for removal. Due to various
39876 optimizations we may mark the same swap more than once. Also
39877 perform special handling for swappable insns that require it. */
39878 for (i = 0; i < e; ++i)
39879 if ((insn_entry[i].is_load || insn_entry[i].is_store)
39880 && insn_entry[i].is_swap)
39882 swap_web_entry* root_entry
39883 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
39884 if (!root_entry->web_not_optimizable)
39885 mark_swaps_for_removal (insn_entry, i);
39887 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
39889 swap_web_entry* root_entry
39890 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
39891 if (!root_entry->web_not_optimizable)
39892 handle_special_swappables (insn_entry, i);
39895 /* Now delete the swaps marked for removal. */
39896 for (i = 0; i < e; ++i)
39897 if (insn_entry[i].will_delete)
39898 replace_swap_with_copy (insn_entry, i);
39900 /* Clean up. */
39901 free (insn_entry);
39902 return 0;
39905 const pass_data pass_data_analyze_swaps =
39907 RTL_PASS, /* type */
39908 "swaps", /* name */
39909 OPTGROUP_NONE, /* optinfo_flags */
39910 TV_NONE, /* tv_id */
39911 0, /* properties_required */
39912 0, /* properties_provided */
39913 0, /* properties_destroyed */
39914 0, /* todo_flags_start */
39915 TODO_df_finish, /* todo_flags_finish */
39918 class pass_analyze_swaps : public rtl_opt_pass
39920 public:
39921 pass_analyze_swaps(gcc::context *ctxt)
39922 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
39925 /* opt_pass methods: */
39926 virtual bool gate (function *)
39928 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
39929 && !TARGET_P9_VECTOR && rs6000_optimize_swaps);
39932 virtual unsigned int execute (function *fun)
39934 return rs6000_analyze_swaps (fun);
39937 }; // class pass_analyze_swaps
39939 rtl_opt_pass *
39940 make_pass_analyze_swaps (gcc::context *ctxt)
39942 return new pass_analyze_swaps (ctxt);
39945 #ifdef RS6000_GLIBC_ATOMIC_FENV
39946 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39947 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39948 #endif
39950 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39952 static void
39953 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39955 if (!TARGET_HARD_FLOAT || !TARGET_FPRS)
39957 #ifdef RS6000_GLIBC_ATOMIC_FENV
39958 if (atomic_hold_decl == NULL_TREE)
39960 atomic_hold_decl
39961 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39962 get_identifier ("__atomic_feholdexcept"),
39963 build_function_type_list (void_type_node,
39964 double_ptr_type_node,
39965 NULL_TREE));
39966 TREE_PUBLIC (atomic_hold_decl) = 1;
39967 DECL_EXTERNAL (atomic_hold_decl) = 1;
39970 if (atomic_clear_decl == NULL_TREE)
39972 atomic_clear_decl
39973 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39974 get_identifier ("__atomic_feclearexcept"),
39975 build_function_type_list (void_type_node,
39976 NULL_TREE));
39977 TREE_PUBLIC (atomic_clear_decl) = 1;
39978 DECL_EXTERNAL (atomic_clear_decl) = 1;
39981 tree const_double = build_qualified_type (double_type_node,
39982 TYPE_QUAL_CONST);
39983 tree const_double_ptr = build_pointer_type (const_double);
39984 if (atomic_update_decl == NULL_TREE)
39986 atomic_update_decl
39987 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39988 get_identifier ("__atomic_feupdateenv"),
39989 build_function_type_list (void_type_node,
39990 const_double_ptr,
39991 NULL_TREE));
39992 TREE_PUBLIC (atomic_update_decl) = 1;
39993 DECL_EXTERNAL (atomic_update_decl) = 1;
39996 tree fenv_var = create_tmp_var_raw (double_type_node);
39997 TREE_ADDRESSABLE (fenv_var) = 1;
39998 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
40000 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
40001 *clear = build_call_expr (atomic_clear_decl, 0);
40002 *update = build_call_expr (atomic_update_decl, 1,
40003 fold_convert (const_double_ptr, fenv_addr));
40004 #endif
40005 return;
40008 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
40009 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
40010 tree call_mffs = build_call_expr (mffs, 0);
40012 /* Generates the equivalent of feholdexcept (&fenv_var)
40014 *fenv_var = __builtin_mffs ();
40015 double fenv_hold;
40016 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
40017 __builtin_mtfsf (0xff, fenv_hold); */
40019 /* Mask to clear everything except for the rounding modes and non-IEEE
40020 arithmetic flag. */
40021 const unsigned HOST_WIDE_INT hold_exception_mask =
40022 HOST_WIDE_INT_C (0xffffffff00000007);
40024 tree fenv_var = create_tmp_var_raw (double_type_node);
40026 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
40028 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
40029 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
40030 build_int_cst (uint64_type_node,
40031 hold_exception_mask));
40033 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
40034 fenv_llu_and);
40036 tree hold_mtfsf = build_call_expr (mtfsf, 2,
40037 build_int_cst (unsigned_type_node, 0xff),
40038 fenv_hold_mtfsf);
40040 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
40042 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
40044 double fenv_clear = __builtin_mffs ();
40045 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
40046 __builtin_mtfsf (0xff, fenv_clear); */
40048 /* Mask to clear everything except for the rounding modes and non-IEEE
40049 arithmetic flag. */
40050 const unsigned HOST_WIDE_INT clear_exception_mask =
40051 HOST_WIDE_INT_C (0xffffffff00000000);
40053 tree fenv_clear = create_tmp_var_raw (double_type_node);
40055 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
40057 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
40058 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
40059 fenv_clean_llu,
40060 build_int_cst (uint64_type_node,
40061 clear_exception_mask));
40063 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
40064 fenv_clear_llu_and);
40066 tree clear_mtfsf = build_call_expr (mtfsf, 2,
40067 build_int_cst (unsigned_type_node, 0xff),
40068 fenv_clear_mtfsf);
40070 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
40072 /* Generates the equivalent of feupdateenv (&fenv_var)
40074 double old_fenv = __builtin_mffs ();
40075 double fenv_update;
40076 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
40077 (*(uint64_t*)fenv_var 0x1ff80fff);
40078 __builtin_mtfsf (0xff, fenv_update); */
40080 const unsigned HOST_WIDE_INT update_exception_mask =
40081 HOST_WIDE_INT_C (0xffffffff1fffff00);
40082 const unsigned HOST_WIDE_INT new_exception_mask =
40083 HOST_WIDE_INT_C (0x1ff80fff);
40085 tree old_fenv = create_tmp_var_raw (double_type_node);
40086 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
40088 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
40089 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
40090 build_int_cst (uint64_type_node,
40091 update_exception_mask));
40093 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
40094 build_int_cst (uint64_type_node,
40095 new_exception_mask));
40097 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
40098 old_llu_and, new_llu_and);
40100 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
40101 new_llu_mask);
40103 tree update_mtfsf = build_call_expr (mtfsf, 2,
40104 build_int_cst (unsigned_type_node, 0xff),
40105 fenv_update_mtfsf);
40107 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
40110 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
40112 static bool
40113 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
40114 optimization_type opt_type)
40116 switch (op)
40118 case rsqrt_optab:
40119 return (opt_type == OPTIMIZE_FOR_SPEED
40120 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
40122 default:
40123 return true;
40127 struct gcc_target targetm = TARGET_INITIALIZER;
40129 #include "gt-rs6000.h"