* config/rs6000/rs6000.c (rs6000_option_override_internal): Make LRA
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
bloba8aa9d19a4398ac9a695e39ab5bf03ccfd853f4b
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2016 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "cfgloop.h"
30 #include "df.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "expmed.h"
34 #include "optabs.h"
35 #include "regs.h"
36 #include "ira.h"
37 #include "recog.h"
38 #include "cgraph.h"
39 #include "diagnostic-core.h"
40 #include "insn-attr.h"
41 #include "flags.h"
42 #include "alias.h"
43 #include "fold-const.h"
44 #include "stor-layout.h"
45 #include "calls.h"
46 #include "print-tree.h"
47 #include "varasm.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "output.h"
51 #include "dbxout.h"
52 #include "common/common-target.h"
53 #include "langhooks.h"
54 #include "reload.h"
55 #include "sched-int.h"
56 #include "gimplify.h"
57 #include "gimple-iterator.h"
58 #include "gimple-walk.h"
59 #include "intl.h"
60 #include "params.h"
61 #include "tm-constrs.h"
62 #include "tree-vectorizer.h"
63 #include "target-globals.h"
64 #include "builtins.h"
65 #include "context.h"
66 #include "tree-pass.h"
67 #if TARGET_XCOFF
68 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
69 #endif
70 #if TARGET_MACHO
71 #include "gstab.h" /* for N_SLINE */
72 #endif
73 #include "case-cfn-macros.h"
74 #include "ppc-auxv.h"
76 /* This file should be included last. */
77 #include "target-def.h"
79 #ifndef TARGET_NO_PROTOTYPE
80 #define TARGET_NO_PROTOTYPE 0
81 #endif
83 #define min(A,B) ((A) < (B) ? (A) : (B))
84 #define max(A,B) ((A) > (B) ? (A) : (B))
86 /* Structure used to define the rs6000 stack */
87 typedef struct rs6000_stack {
88 int reload_completed; /* stack info won't change from here on */
89 int first_gp_reg_save; /* first callee saved GP register used */
90 int first_fp_reg_save; /* first callee saved FP register used */
91 int first_altivec_reg_save; /* first callee saved AltiVec register used */
92 int lr_save_p; /* true if the link reg needs to be saved */
93 int cr_save_p; /* true if the CR reg needs to be saved */
94 unsigned int vrsave_mask; /* mask of vec registers to save */
95 int push_p; /* true if we need to allocate stack space */
96 int calls_p; /* true if the function makes any calls */
97 int world_save_p; /* true if we're saving *everything*:
98 r13-r31, cr, f14-f31, vrsave, v20-v31 */
99 enum rs6000_abi abi; /* which ABI to use */
100 int gp_save_offset; /* offset to save GP regs from initial SP */
101 int fp_save_offset; /* offset to save FP regs from initial SP */
102 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
103 int lr_save_offset; /* offset to save LR from initial SP */
104 int cr_save_offset; /* offset to save CR from initial SP */
105 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
106 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
107 int varargs_save_offset; /* offset to save the varargs registers */
108 int ehrd_offset; /* offset to EH return data */
109 int ehcr_offset; /* offset to EH CR field data */
110 int reg_size; /* register size (4 or 8) */
111 HOST_WIDE_INT vars_size; /* variable save area size */
112 int parm_size; /* outgoing parameter size */
113 int save_size; /* save area size */
114 int fixed_size; /* fixed size of stack frame */
115 int gp_size; /* size of saved GP registers */
116 int fp_size; /* size of saved FP registers */
117 int altivec_size; /* size of saved AltiVec registers */
118 int cr_size; /* size to hold CR if not in fixed area */
119 int vrsave_size; /* size to hold VRSAVE */
120 int altivec_padding_size; /* size of altivec alignment padding */
121 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
122 int spe_padding_size;
123 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
124 int spe_64bit_regs_used;
125 int savres_strategy;
126 } rs6000_stack_t;
128 /* A C structure for machine-specific, per-function data.
129 This is added to the cfun structure. */
130 typedef struct GTY(()) machine_function
132 /* Whether the instruction chain has been scanned already. */
133 int spe_insn_chain_scanned_p;
134 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
135 int ra_needs_full_frame;
136 /* Flags if __builtin_return_address (0) was used. */
137 int ra_need_lr;
138 /* Cache lr_save_p after expansion of builtin_eh_return. */
139 int lr_save_state;
140 /* Whether we need to save the TOC to the reserved stack location in the
141 function prologue. */
142 bool save_toc_in_prologue;
143 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
144 varargs save area. */
145 HOST_WIDE_INT varargs_save_offset;
146 /* Temporary stack slot to use for SDmode copies. This slot is
147 64-bits wide and is allocated early enough so that the offset
148 does not overflow the 16-bit load/store offset field. */
149 rtx sdmode_stack_slot;
150 /* Alternative internal arg pointer for -fsplit-stack. */
151 rtx split_stack_arg_pointer;
152 bool split_stack_argp_used;
153 /* Flag if r2 setup is needed with ELFv2 ABI. */
154 bool r2_setup_needed;
155 } machine_function;
157 /* Support targetm.vectorize.builtin_mask_for_load. */
158 static GTY(()) tree altivec_builtin_mask_for_load;
160 /* Set to nonzero once AIX common-mode calls have been defined. */
161 static GTY(()) int common_mode_defined;
163 /* Label number of label created for -mrelocatable, to call to so we can
164 get the address of the GOT section */
165 static int rs6000_pic_labelno;
167 #ifdef USING_ELFOS_H
168 /* Counter for labels which are to be placed in .fixup. */
169 int fixuplabelno = 0;
170 #endif
172 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
173 int dot_symbols;
175 /* Specify the machine mode that pointers have. After generation of rtl, the
176 compiler makes no further distinction between pointers and any other objects
177 of this machine mode. The type is unsigned since not all things that
178 include rs6000.h also include machmode.h. */
179 unsigned rs6000_pmode;
181 /* Width in bits of a pointer. */
182 unsigned rs6000_pointer_size;
184 #ifdef HAVE_AS_GNU_ATTRIBUTE
185 /* Flag whether floating point values have been passed/returned. */
186 static bool rs6000_passes_float;
187 /* Flag whether vector values have been passed/returned. */
188 static bool rs6000_passes_vector;
189 /* Flag whether small (<= 8 byte) structures have been returned. */
190 static bool rs6000_returns_struct;
191 #endif
193 /* Value is TRUE if register/mode pair is acceptable. */
194 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
196 /* Maximum number of registers needed for a given register class and mode. */
197 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
199 /* How many registers are needed for a given register and mode. */
200 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
202 /* Map register number to register class. */
203 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
205 static int dbg_cost_ctrl;
207 /* Built in types. */
208 tree rs6000_builtin_types[RS6000_BTI_MAX];
209 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
211 /* Flag to say the TOC is initialized */
212 int toc_initialized, need_toc_init;
213 char toc_label_name[10];
215 /* Cached value of rs6000_variable_issue. This is cached in
216 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
217 static short cached_can_issue_more;
219 static GTY(()) section *read_only_data_section;
220 static GTY(()) section *private_data_section;
221 static GTY(()) section *tls_data_section;
222 static GTY(()) section *tls_private_data_section;
223 static GTY(()) section *read_only_private_data_section;
224 static GTY(()) section *sdata2_section;
225 static GTY(()) section *toc_section;
227 struct builtin_description
229 const HOST_WIDE_INT mask;
230 const enum insn_code icode;
231 const char *const name;
232 const enum rs6000_builtins code;
235 /* Describe the vector unit used for modes. */
236 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
237 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
239 /* Register classes for various constraints that are based on the target
240 switches. */
241 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
243 /* Describe the alignment of a vector. */
244 int rs6000_vector_align[NUM_MACHINE_MODES];
246 /* Map selected modes to types for builtins. */
247 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
249 /* What modes to automatically generate reciprocal divide estimate (fre) and
250 reciprocal sqrt (frsqrte) for. */
251 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
253 /* Masks to determine which reciprocal esitmate instructions to generate
254 automatically. */
255 enum rs6000_recip_mask {
256 RECIP_SF_DIV = 0x001, /* Use divide estimate */
257 RECIP_DF_DIV = 0x002,
258 RECIP_V4SF_DIV = 0x004,
259 RECIP_V2DF_DIV = 0x008,
261 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
262 RECIP_DF_RSQRT = 0x020,
263 RECIP_V4SF_RSQRT = 0x040,
264 RECIP_V2DF_RSQRT = 0x080,
266 /* Various combination of flags for -mrecip=xxx. */
267 RECIP_NONE = 0,
268 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
269 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
270 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
272 RECIP_HIGH_PRECISION = RECIP_ALL,
274 /* On low precision machines like the power5, don't enable double precision
275 reciprocal square root estimate, since it isn't accurate enough. */
276 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
279 /* -mrecip options. */
280 static struct
282 const char *string; /* option name */
283 unsigned int mask; /* mask bits to set */
284 } recip_options[] = {
285 { "all", RECIP_ALL },
286 { "none", RECIP_NONE },
287 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
288 | RECIP_V2DF_DIV) },
289 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
290 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
291 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
292 | RECIP_V2DF_RSQRT) },
293 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
294 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
297 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
298 static const struct
300 const char *cpu;
301 unsigned int cpuid;
302 } cpu_is_info[] = {
303 { "power9", PPC_PLATFORM_POWER9 },
304 { "power8", PPC_PLATFORM_POWER8 },
305 { "power7", PPC_PLATFORM_POWER7 },
306 { "power6x", PPC_PLATFORM_POWER6X },
307 { "power6", PPC_PLATFORM_POWER6 },
308 { "power5+", PPC_PLATFORM_POWER5_PLUS },
309 { "power5", PPC_PLATFORM_POWER5 },
310 { "ppc970", PPC_PLATFORM_PPC970 },
311 { "power4", PPC_PLATFORM_POWER4 },
312 { "ppca2", PPC_PLATFORM_PPCA2 },
313 { "ppc476", PPC_PLATFORM_PPC476 },
314 { "ppc464", PPC_PLATFORM_PPC464 },
315 { "ppc440", PPC_PLATFORM_PPC440 },
316 { "ppc405", PPC_PLATFORM_PPC405 },
317 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
320 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
321 static const struct
323 const char *hwcap;
324 int mask;
325 unsigned int id;
326 } cpu_supports_info[] = {
327 /* AT_HWCAP masks. */
328 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
329 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
330 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
331 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
332 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
333 { "booke", PPC_FEATURE_BOOKE, 0 },
334 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
335 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
336 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
337 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
338 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
339 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
340 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
341 { "notb", PPC_FEATURE_NO_TB, 0 },
342 { "pa6t", PPC_FEATURE_PA6T, 0 },
343 { "power4", PPC_FEATURE_POWER4, 0 },
344 { "power5", PPC_FEATURE_POWER5, 0 },
345 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
346 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
347 { "ppc32", PPC_FEATURE_32, 0 },
348 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
349 { "ppc64", PPC_FEATURE_64, 0 },
350 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
351 { "smt", PPC_FEATURE_SMT, 0 },
352 { "spe", PPC_FEATURE_HAS_SPE, 0 },
353 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
354 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
355 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
357 /* AT_HWCAP2 masks. */
358 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
359 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
360 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
361 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
362 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
363 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
364 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
365 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
366 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
367 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 }
370 /* Newer LIBCs explicitly export this symbol to declare that they provide
371 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
372 reference to this symbol whenever we expand a CPU builtin, so that
373 we never link against an old LIBC. */
374 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
376 /* True if we have expanded a CPU builtin. */
377 bool cpu_builtin_p;
379 /* Pointer to function (in rs6000-c.c) that can define or undefine target
380 macros that have changed. Languages that don't support the preprocessor
381 don't link in rs6000-c.c, so we can't call it directly. */
382 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
384 /* Simplfy register classes into simpler classifications. We assume
385 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
386 check for standard register classes (gpr/floating/altivec/vsx) and
387 floating/vector classes (float/altivec/vsx). */
389 enum rs6000_reg_type {
390 NO_REG_TYPE,
391 PSEUDO_REG_TYPE,
392 GPR_REG_TYPE,
393 VSX_REG_TYPE,
394 ALTIVEC_REG_TYPE,
395 FPR_REG_TYPE,
396 SPR_REG_TYPE,
397 CR_REG_TYPE,
398 SPE_ACC_TYPE,
399 SPEFSCR_REG_TYPE
402 /* Map register class to register type. */
403 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
405 /* First/last register type for the 'normal' register types (i.e. general
406 purpose, floating point, altivec, and VSX registers). */
407 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
409 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
412 /* Register classes we care about in secondary reload or go if legitimate
413 address. We only need to worry about GPR, FPR, and Altivec registers here,
414 along an ANY field that is the OR of the 3 register classes. */
416 enum rs6000_reload_reg_type {
417 RELOAD_REG_GPR, /* General purpose registers. */
418 RELOAD_REG_FPR, /* Traditional floating point regs. */
419 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
420 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
421 N_RELOAD_REG
424 /* For setting up register classes, loop through the 3 register classes mapping
425 into real registers, and skip the ANY class, which is just an OR of the
426 bits. */
427 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
428 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
430 /* Map reload register type to a register in the register class. */
431 struct reload_reg_map_type {
432 const char *name; /* Register class name. */
433 int reg; /* Register in the register class. */
436 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
437 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
438 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
439 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
440 { "Any", -1 }, /* RELOAD_REG_ANY. */
443 /* Mask bits for each register class, indexed per mode. Historically the
444 compiler has been more restrictive which types can do PRE_MODIFY instead of
445 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
446 typedef unsigned char addr_mask_type;
448 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
449 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
450 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
451 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
452 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
453 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
454 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
455 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
457 /* Register type masks based on the type, of valid addressing modes. */
458 struct rs6000_reg_addr {
459 enum insn_code reload_load; /* INSN to reload for loading. */
460 enum insn_code reload_store; /* INSN to reload for storing. */
461 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
462 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
463 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
464 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
465 /* INSNs for fusing addi with loads
466 or stores for each reg. class. */
467 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
468 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
469 /* INSNs for fusing addis with loads
470 or stores for each reg. class. */
471 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
472 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
473 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
474 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
475 bool fused_toc; /* Mode supports TOC fusion. */
478 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
480 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
481 static inline bool
482 mode_supports_pre_incdec_p (machine_mode mode)
484 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
485 != 0);
488 /* Helper function to say whether a mode supports PRE_MODIFY. */
489 static inline bool
490 mode_supports_pre_modify_p (machine_mode mode)
492 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
493 != 0);
496 /* Return true if we have D-form addressing in altivec registers. */
497 static inline bool
498 mode_supports_vmx_dform (machine_mode mode)
500 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
503 /* Return true if we have D-form addressing in VSX registers. This addressing
504 is more limited than normal d-form addressing in that the offset must be
505 aligned on a 16-byte boundary. */
506 static inline bool
507 mode_supports_vsx_dform_quad (machine_mode mode)
509 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
510 != 0);
514 /* Target cpu costs. */
516 struct processor_costs {
517 const int mulsi; /* cost of SImode multiplication. */
518 const int mulsi_const; /* cost of SImode multiplication by constant. */
519 const int mulsi_const9; /* cost of SImode mult by short constant. */
520 const int muldi; /* cost of DImode multiplication. */
521 const int divsi; /* cost of SImode division. */
522 const int divdi; /* cost of DImode division. */
523 const int fp; /* cost of simple SFmode and DFmode insns. */
524 const int dmul; /* cost of DFmode multiplication (and fmadd). */
525 const int sdiv; /* cost of SFmode division (fdivs). */
526 const int ddiv; /* cost of DFmode division (fdiv). */
527 const int cache_line_size; /* cache line size in bytes. */
528 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
529 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
530 const int simultaneous_prefetches; /* number of parallel prefetch
531 operations. */
532 const int sfdf_convert; /* cost of SF->DF conversion. */
535 const struct processor_costs *rs6000_cost;
537 /* Processor costs (relative to an add) */
539 /* Instruction size costs on 32bit processors. */
540 static const
541 struct processor_costs size32_cost = {
542 COSTS_N_INSNS (1), /* mulsi */
543 COSTS_N_INSNS (1), /* mulsi_const */
544 COSTS_N_INSNS (1), /* mulsi_const9 */
545 COSTS_N_INSNS (1), /* muldi */
546 COSTS_N_INSNS (1), /* divsi */
547 COSTS_N_INSNS (1), /* divdi */
548 COSTS_N_INSNS (1), /* fp */
549 COSTS_N_INSNS (1), /* dmul */
550 COSTS_N_INSNS (1), /* sdiv */
551 COSTS_N_INSNS (1), /* ddiv */
552 32, /* cache line size */
553 0, /* l1 cache */
554 0, /* l2 cache */
555 0, /* streams */
556 0, /* SF->DF convert */
559 /* Instruction size costs on 64bit processors. */
560 static const
561 struct processor_costs size64_cost = {
562 COSTS_N_INSNS (1), /* mulsi */
563 COSTS_N_INSNS (1), /* mulsi_const */
564 COSTS_N_INSNS (1), /* mulsi_const9 */
565 COSTS_N_INSNS (1), /* muldi */
566 COSTS_N_INSNS (1), /* divsi */
567 COSTS_N_INSNS (1), /* divdi */
568 COSTS_N_INSNS (1), /* fp */
569 COSTS_N_INSNS (1), /* dmul */
570 COSTS_N_INSNS (1), /* sdiv */
571 COSTS_N_INSNS (1), /* ddiv */
572 128, /* cache line size */
573 0, /* l1 cache */
574 0, /* l2 cache */
575 0, /* streams */
576 0, /* SF->DF convert */
579 /* Instruction costs on RS64A processors. */
580 static const
581 struct processor_costs rs64a_cost = {
582 COSTS_N_INSNS (20), /* mulsi */
583 COSTS_N_INSNS (12), /* mulsi_const */
584 COSTS_N_INSNS (8), /* mulsi_const9 */
585 COSTS_N_INSNS (34), /* muldi */
586 COSTS_N_INSNS (65), /* divsi */
587 COSTS_N_INSNS (67), /* divdi */
588 COSTS_N_INSNS (4), /* fp */
589 COSTS_N_INSNS (4), /* dmul */
590 COSTS_N_INSNS (31), /* sdiv */
591 COSTS_N_INSNS (31), /* ddiv */
592 128, /* cache line size */
593 128, /* l1 cache */
594 2048, /* l2 cache */
595 1, /* streams */
596 0, /* SF->DF convert */
599 /* Instruction costs on MPCCORE processors. */
600 static const
601 struct processor_costs mpccore_cost = {
602 COSTS_N_INSNS (2), /* mulsi */
603 COSTS_N_INSNS (2), /* mulsi_const */
604 COSTS_N_INSNS (2), /* mulsi_const9 */
605 COSTS_N_INSNS (2), /* muldi */
606 COSTS_N_INSNS (6), /* divsi */
607 COSTS_N_INSNS (6), /* divdi */
608 COSTS_N_INSNS (4), /* fp */
609 COSTS_N_INSNS (5), /* dmul */
610 COSTS_N_INSNS (10), /* sdiv */
611 COSTS_N_INSNS (17), /* ddiv */
612 32, /* cache line size */
613 4, /* l1 cache */
614 16, /* l2 cache */
615 1, /* streams */
616 0, /* SF->DF convert */
619 /* Instruction costs on PPC403 processors. */
620 static const
621 struct processor_costs ppc403_cost = {
622 COSTS_N_INSNS (4), /* mulsi */
623 COSTS_N_INSNS (4), /* mulsi_const */
624 COSTS_N_INSNS (4), /* mulsi_const9 */
625 COSTS_N_INSNS (4), /* muldi */
626 COSTS_N_INSNS (33), /* divsi */
627 COSTS_N_INSNS (33), /* divdi */
628 COSTS_N_INSNS (11), /* fp */
629 COSTS_N_INSNS (11), /* dmul */
630 COSTS_N_INSNS (11), /* sdiv */
631 COSTS_N_INSNS (11), /* ddiv */
632 32, /* cache line size */
633 4, /* l1 cache */
634 16, /* l2 cache */
635 1, /* streams */
636 0, /* SF->DF convert */
639 /* Instruction costs on PPC405 processors. */
640 static const
641 struct processor_costs ppc405_cost = {
642 COSTS_N_INSNS (5), /* mulsi */
643 COSTS_N_INSNS (4), /* mulsi_const */
644 COSTS_N_INSNS (3), /* mulsi_const9 */
645 COSTS_N_INSNS (5), /* muldi */
646 COSTS_N_INSNS (35), /* divsi */
647 COSTS_N_INSNS (35), /* divdi */
648 COSTS_N_INSNS (11), /* fp */
649 COSTS_N_INSNS (11), /* dmul */
650 COSTS_N_INSNS (11), /* sdiv */
651 COSTS_N_INSNS (11), /* ddiv */
652 32, /* cache line size */
653 16, /* l1 cache */
654 128, /* l2 cache */
655 1, /* streams */
656 0, /* SF->DF convert */
659 /* Instruction costs on PPC440 processors. */
660 static const
661 struct processor_costs ppc440_cost = {
662 COSTS_N_INSNS (3), /* mulsi */
663 COSTS_N_INSNS (2), /* mulsi_const */
664 COSTS_N_INSNS (2), /* mulsi_const9 */
665 COSTS_N_INSNS (3), /* muldi */
666 COSTS_N_INSNS (34), /* divsi */
667 COSTS_N_INSNS (34), /* divdi */
668 COSTS_N_INSNS (5), /* fp */
669 COSTS_N_INSNS (5), /* dmul */
670 COSTS_N_INSNS (19), /* sdiv */
671 COSTS_N_INSNS (33), /* ddiv */
672 32, /* cache line size */
673 32, /* l1 cache */
674 256, /* l2 cache */
675 1, /* streams */
676 0, /* SF->DF convert */
679 /* Instruction costs on PPC476 processors. */
680 static const
681 struct processor_costs ppc476_cost = {
682 COSTS_N_INSNS (4), /* mulsi */
683 COSTS_N_INSNS (4), /* mulsi_const */
684 COSTS_N_INSNS (4), /* mulsi_const9 */
685 COSTS_N_INSNS (4), /* muldi */
686 COSTS_N_INSNS (11), /* divsi */
687 COSTS_N_INSNS (11), /* divdi */
688 COSTS_N_INSNS (6), /* fp */
689 COSTS_N_INSNS (6), /* dmul */
690 COSTS_N_INSNS (19), /* sdiv */
691 COSTS_N_INSNS (33), /* ddiv */
692 32, /* l1 cache line size */
693 32, /* l1 cache */
694 512, /* l2 cache */
695 1, /* streams */
696 0, /* SF->DF convert */
699 /* Instruction costs on PPC601 processors. */
700 static const
701 struct processor_costs ppc601_cost = {
702 COSTS_N_INSNS (5), /* mulsi */
703 COSTS_N_INSNS (5), /* mulsi_const */
704 COSTS_N_INSNS (5), /* mulsi_const9 */
705 COSTS_N_INSNS (5), /* muldi */
706 COSTS_N_INSNS (36), /* divsi */
707 COSTS_N_INSNS (36), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (5), /* dmul */
710 COSTS_N_INSNS (17), /* sdiv */
711 COSTS_N_INSNS (31), /* ddiv */
712 32, /* cache line size */
713 32, /* l1 cache */
714 256, /* l2 cache */
715 1, /* streams */
716 0, /* SF->DF convert */
719 /* Instruction costs on PPC603 processors. */
720 static const
721 struct processor_costs ppc603_cost = {
722 COSTS_N_INSNS (5), /* mulsi */
723 COSTS_N_INSNS (3), /* mulsi_const */
724 COSTS_N_INSNS (2), /* mulsi_const9 */
725 COSTS_N_INSNS (5), /* muldi */
726 COSTS_N_INSNS (37), /* divsi */
727 COSTS_N_INSNS (37), /* divdi */
728 COSTS_N_INSNS (3), /* fp */
729 COSTS_N_INSNS (4), /* dmul */
730 COSTS_N_INSNS (18), /* sdiv */
731 COSTS_N_INSNS (33), /* ddiv */
732 32, /* cache line size */
733 8, /* l1 cache */
734 64, /* l2 cache */
735 1, /* streams */
736 0, /* SF->DF convert */
739 /* Instruction costs on PPC604 processors. */
740 static const
741 struct processor_costs ppc604_cost = {
742 COSTS_N_INSNS (4), /* mulsi */
743 COSTS_N_INSNS (4), /* mulsi_const */
744 COSTS_N_INSNS (4), /* mulsi_const9 */
745 COSTS_N_INSNS (4), /* muldi */
746 COSTS_N_INSNS (20), /* divsi */
747 COSTS_N_INSNS (20), /* divdi */
748 COSTS_N_INSNS (3), /* fp */
749 COSTS_N_INSNS (3), /* dmul */
750 COSTS_N_INSNS (18), /* sdiv */
751 COSTS_N_INSNS (32), /* ddiv */
752 32, /* cache line size */
753 16, /* l1 cache */
754 512, /* l2 cache */
755 1, /* streams */
756 0, /* SF->DF convert */
759 /* Instruction costs on PPC604e processors. */
760 static const
761 struct processor_costs ppc604e_cost = {
762 COSTS_N_INSNS (2), /* mulsi */
763 COSTS_N_INSNS (2), /* mulsi_const */
764 COSTS_N_INSNS (2), /* mulsi_const9 */
765 COSTS_N_INSNS (2), /* muldi */
766 COSTS_N_INSNS (20), /* divsi */
767 COSTS_N_INSNS (20), /* divdi */
768 COSTS_N_INSNS (3), /* fp */
769 COSTS_N_INSNS (3), /* dmul */
770 COSTS_N_INSNS (18), /* sdiv */
771 COSTS_N_INSNS (32), /* ddiv */
772 32, /* cache line size */
773 32, /* l1 cache */
774 1024, /* l2 cache */
775 1, /* streams */
776 0, /* SF->DF convert */
779 /* Instruction costs on PPC620 processors. */
780 static const
781 struct processor_costs ppc620_cost = {
782 COSTS_N_INSNS (5), /* mulsi */
783 COSTS_N_INSNS (4), /* mulsi_const */
784 COSTS_N_INSNS (3), /* mulsi_const9 */
785 COSTS_N_INSNS (7), /* muldi */
786 COSTS_N_INSNS (21), /* divsi */
787 COSTS_N_INSNS (37), /* divdi */
788 COSTS_N_INSNS (3), /* fp */
789 COSTS_N_INSNS (3), /* dmul */
790 COSTS_N_INSNS (18), /* sdiv */
791 COSTS_N_INSNS (32), /* ddiv */
792 128, /* cache line size */
793 32, /* l1 cache */
794 1024, /* l2 cache */
795 1, /* streams */
796 0, /* SF->DF convert */
799 /* Instruction costs on PPC630 processors. */
800 static const
801 struct processor_costs ppc630_cost = {
802 COSTS_N_INSNS (5), /* mulsi */
803 COSTS_N_INSNS (4), /* mulsi_const */
804 COSTS_N_INSNS (3), /* mulsi_const9 */
805 COSTS_N_INSNS (7), /* muldi */
806 COSTS_N_INSNS (21), /* divsi */
807 COSTS_N_INSNS (37), /* divdi */
808 COSTS_N_INSNS (3), /* fp */
809 COSTS_N_INSNS (3), /* dmul */
810 COSTS_N_INSNS (17), /* sdiv */
811 COSTS_N_INSNS (21), /* ddiv */
812 128, /* cache line size */
813 64, /* l1 cache */
814 1024, /* l2 cache */
815 1, /* streams */
816 0, /* SF->DF convert */
819 /* Instruction costs on Cell processor. */
820 /* COSTS_N_INSNS (1) ~ one add. */
821 static const
822 struct processor_costs ppccell_cost = {
823 COSTS_N_INSNS (9/2)+2, /* mulsi */
824 COSTS_N_INSNS (6/2), /* mulsi_const */
825 COSTS_N_INSNS (6/2), /* mulsi_const9 */
826 COSTS_N_INSNS (15/2)+2, /* muldi */
827 COSTS_N_INSNS (38/2), /* divsi */
828 COSTS_N_INSNS (70/2), /* divdi */
829 COSTS_N_INSNS (10/2), /* fp */
830 COSTS_N_INSNS (10/2), /* dmul */
831 COSTS_N_INSNS (74/2), /* sdiv */
832 COSTS_N_INSNS (74/2), /* ddiv */
833 128, /* cache line size */
834 32, /* l1 cache */
835 512, /* l2 cache */
836 6, /* streams */
837 0, /* SF->DF convert */
840 /* Instruction costs on PPC750 and PPC7400 processors. */
841 static const
842 struct processor_costs ppc750_cost = {
843 COSTS_N_INSNS (5), /* mulsi */
844 COSTS_N_INSNS (3), /* mulsi_const */
845 COSTS_N_INSNS (2), /* mulsi_const9 */
846 COSTS_N_INSNS (5), /* muldi */
847 COSTS_N_INSNS (17), /* divsi */
848 COSTS_N_INSNS (17), /* divdi */
849 COSTS_N_INSNS (3), /* fp */
850 COSTS_N_INSNS (3), /* dmul */
851 COSTS_N_INSNS (17), /* sdiv */
852 COSTS_N_INSNS (31), /* ddiv */
853 32, /* cache line size */
854 32, /* l1 cache */
855 512, /* l2 cache */
856 1, /* streams */
857 0, /* SF->DF convert */
860 /* Instruction costs on PPC7450 processors. */
861 static const
862 struct processor_costs ppc7450_cost = {
863 COSTS_N_INSNS (4), /* mulsi */
864 COSTS_N_INSNS (3), /* mulsi_const */
865 COSTS_N_INSNS (3), /* mulsi_const9 */
866 COSTS_N_INSNS (4), /* muldi */
867 COSTS_N_INSNS (23), /* divsi */
868 COSTS_N_INSNS (23), /* divdi */
869 COSTS_N_INSNS (5), /* fp */
870 COSTS_N_INSNS (5), /* dmul */
871 COSTS_N_INSNS (21), /* sdiv */
872 COSTS_N_INSNS (35), /* ddiv */
873 32, /* cache line size */
874 32, /* l1 cache */
875 1024, /* l2 cache */
876 1, /* streams */
877 0, /* SF->DF convert */
880 /* Instruction costs on PPC8540 processors. */
881 static const
882 struct processor_costs ppc8540_cost = {
883 COSTS_N_INSNS (4), /* mulsi */
884 COSTS_N_INSNS (4), /* mulsi_const */
885 COSTS_N_INSNS (4), /* mulsi_const9 */
886 COSTS_N_INSNS (4), /* muldi */
887 COSTS_N_INSNS (19), /* divsi */
888 COSTS_N_INSNS (19), /* divdi */
889 COSTS_N_INSNS (4), /* fp */
890 COSTS_N_INSNS (4), /* dmul */
891 COSTS_N_INSNS (29), /* sdiv */
892 COSTS_N_INSNS (29), /* ddiv */
893 32, /* cache line size */
894 32, /* l1 cache */
895 256, /* l2 cache */
896 1, /* prefetch streams /*/
897 0, /* SF->DF convert */
900 /* Instruction costs on E300C2 and E300C3 cores. */
901 static const
902 struct processor_costs ppce300c2c3_cost = {
903 COSTS_N_INSNS (4), /* mulsi */
904 COSTS_N_INSNS (4), /* mulsi_const */
905 COSTS_N_INSNS (4), /* mulsi_const9 */
906 COSTS_N_INSNS (4), /* muldi */
907 COSTS_N_INSNS (19), /* divsi */
908 COSTS_N_INSNS (19), /* divdi */
909 COSTS_N_INSNS (3), /* fp */
910 COSTS_N_INSNS (4), /* dmul */
911 COSTS_N_INSNS (18), /* sdiv */
912 COSTS_N_INSNS (33), /* ddiv */
914 16, /* l1 cache */
915 16, /* l2 cache */
916 1, /* prefetch streams /*/
917 0, /* SF->DF convert */
920 /* Instruction costs on PPCE500MC processors. */
921 static const
922 struct processor_costs ppce500mc_cost = {
923 COSTS_N_INSNS (4), /* mulsi */
924 COSTS_N_INSNS (4), /* mulsi_const */
925 COSTS_N_INSNS (4), /* mulsi_const9 */
926 COSTS_N_INSNS (4), /* muldi */
927 COSTS_N_INSNS (14), /* divsi */
928 COSTS_N_INSNS (14), /* divdi */
929 COSTS_N_INSNS (8), /* fp */
930 COSTS_N_INSNS (10), /* dmul */
931 COSTS_N_INSNS (36), /* sdiv */
932 COSTS_N_INSNS (66), /* ddiv */
933 64, /* cache line size */
934 32, /* l1 cache */
935 128, /* l2 cache */
936 1, /* prefetch streams /*/
937 0, /* SF->DF convert */
940 /* Instruction costs on PPCE500MC64 processors. */
941 static const
942 struct processor_costs ppce500mc64_cost = {
943 COSTS_N_INSNS (4), /* mulsi */
944 COSTS_N_INSNS (4), /* mulsi_const */
945 COSTS_N_INSNS (4), /* mulsi_const9 */
946 COSTS_N_INSNS (4), /* muldi */
947 COSTS_N_INSNS (14), /* divsi */
948 COSTS_N_INSNS (14), /* divdi */
949 COSTS_N_INSNS (4), /* fp */
950 COSTS_N_INSNS (10), /* dmul */
951 COSTS_N_INSNS (36), /* sdiv */
952 COSTS_N_INSNS (66), /* ddiv */
953 64, /* cache line size */
954 32, /* l1 cache */
955 128, /* l2 cache */
956 1, /* prefetch streams /*/
957 0, /* SF->DF convert */
960 /* Instruction costs on PPCE5500 processors. */
961 static const
962 struct processor_costs ppce5500_cost = {
963 COSTS_N_INSNS (5), /* mulsi */
964 COSTS_N_INSNS (5), /* mulsi_const */
965 COSTS_N_INSNS (4), /* mulsi_const9 */
966 COSTS_N_INSNS (5), /* muldi */
967 COSTS_N_INSNS (14), /* divsi */
968 COSTS_N_INSNS (14), /* divdi */
969 COSTS_N_INSNS (7), /* fp */
970 COSTS_N_INSNS (10), /* dmul */
971 COSTS_N_INSNS (36), /* sdiv */
972 COSTS_N_INSNS (66), /* ddiv */
973 64, /* cache line size */
974 32, /* l1 cache */
975 128, /* l2 cache */
976 1, /* prefetch streams /*/
977 0, /* SF->DF convert */
980 /* Instruction costs on PPCE6500 processors. */
981 static const
982 struct processor_costs ppce6500_cost = {
983 COSTS_N_INSNS (5), /* mulsi */
984 COSTS_N_INSNS (5), /* mulsi_const */
985 COSTS_N_INSNS (4), /* mulsi_const9 */
986 COSTS_N_INSNS (5), /* muldi */
987 COSTS_N_INSNS (14), /* divsi */
988 COSTS_N_INSNS (14), /* divdi */
989 COSTS_N_INSNS (7), /* fp */
990 COSTS_N_INSNS (10), /* dmul */
991 COSTS_N_INSNS (36), /* sdiv */
992 COSTS_N_INSNS (66), /* ddiv */
993 64, /* cache line size */
994 32, /* l1 cache */
995 128, /* l2 cache */
996 1, /* prefetch streams /*/
997 0, /* SF->DF convert */
1000 /* Instruction costs on AppliedMicro Titan processors. */
1001 static const
1002 struct processor_costs titan_cost = {
1003 COSTS_N_INSNS (5), /* mulsi */
1004 COSTS_N_INSNS (5), /* mulsi_const */
1005 COSTS_N_INSNS (5), /* mulsi_const9 */
1006 COSTS_N_INSNS (5), /* muldi */
1007 COSTS_N_INSNS (18), /* divsi */
1008 COSTS_N_INSNS (18), /* divdi */
1009 COSTS_N_INSNS (10), /* fp */
1010 COSTS_N_INSNS (10), /* dmul */
1011 COSTS_N_INSNS (46), /* sdiv */
1012 COSTS_N_INSNS (72), /* ddiv */
1013 32, /* cache line size */
1014 32, /* l1 cache */
1015 512, /* l2 cache */
1016 1, /* prefetch streams /*/
1017 0, /* SF->DF convert */
1020 /* Instruction costs on POWER4 and POWER5 processors. */
1021 static const
1022 struct processor_costs power4_cost = {
1023 COSTS_N_INSNS (3), /* mulsi */
1024 COSTS_N_INSNS (2), /* mulsi_const */
1025 COSTS_N_INSNS (2), /* mulsi_const9 */
1026 COSTS_N_INSNS (4), /* muldi */
1027 COSTS_N_INSNS (18), /* divsi */
1028 COSTS_N_INSNS (34), /* divdi */
1029 COSTS_N_INSNS (3), /* fp */
1030 COSTS_N_INSNS (3), /* dmul */
1031 COSTS_N_INSNS (17), /* sdiv */
1032 COSTS_N_INSNS (17), /* ddiv */
1033 128, /* cache line size */
1034 32, /* l1 cache */
1035 1024, /* l2 cache */
1036 8, /* prefetch streams /*/
1037 0, /* SF->DF convert */
1040 /* Instruction costs on POWER6 processors. */
1041 static const
1042 struct processor_costs power6_cost = {
1043 COSTS_N_INSNS (8), /* mulsi */
1044 COSTS_N_INSNS (8), /* mulsi_const */
1045 COSTS_N_INSNS (8), /* mulsi_const9 */
1046 COSTS_N_INSNS (8), /* muldi */
1047 COSTS_N_INSNS (22), /* divsi */
1048 COSTS_N_INSNS (28), /* divdi */
1049 COSTS_N_INSNS (3), /* fp */
1050 COSTS_N_INSNS (3), /* dmul */
1051 COSTS_N_INSNS (13), /* sdiv */
1052 COSTS_N_INSNS (16), /* ddiv */
1053 128, /* cache line size */
1054 64, /* l1 cache */
1055 2048, /* l2 cache */
1056 16, /* prefetch streams */
1057 0, /* SF->DF convert */
1060 /* Instruction costs on POWER7 processors. */
1061 static const
1062 struct processor_costs power7_cost = {
1063 COSTS_N_INSNS (2), /* mulsi */
1064 COSTS_N_INSNS (2), /* mulsi_const */
1065 COSTS_N_INSNS (2), /* mulsi_const9 */
1066 COSTS_N_INSNS (2), /* muldi */
1067 COSTS_N_INSNS (18), /* divsi */
1068 COSTS_N_INSNS (34), /* divdi */
1069 COSTS_N_INSNS (3), /* fp */
1070 COSTS_N_INSNS (3), /* dmul */
1071 COSTS_N_INSNS (13), /* sdiv */
1072 COSTS_N_INSNS (16), /* ddiv */
1073 128, /* cache line size */
1074 32, /* l1 cache */
1075 256, /* l2 cache */
1076 12, /* prefetch streams */
1077 COSTS_N_INSNS (3), /* SF->DF convert */
1080 /* Instruction costs on POWER8 processors. */
1081 static const
1082 struct processor_costs power8_cost = {
1083 COSTS_N_INSNS (3), /* mulsi */
1084 COSTS_N_INSNS (3), /* mulsi_const */
1085 COSTS_N_INSNS (3), /* mulsi_const9 */
1086 COSTS_N_INSNS (3), /* muldi */
1087 COSTS_N_INSNS (19), /* divsi */
1088 COSTS_N_INSNS (35), /* divdi */
1089 COSTS_N_INSNS (3), /* fp */
1090 COSTS_N_INSNS (3), /* dmul */
1091 COSTS_N_INSNS (14), /* sdiv */
1092 COSTS_N_INSNS (17), /* ddiv */
1093 128, /* cache line size */
1094 32, /* l1 cache */
1095 256, /* l2 cache */
1096 12, /* prefetch streams */
1097 COSTS_N_INSNS (3), /* SF->DF convert */
1100 /* Instruction costs on POWER9 processors. */
1101 static const
1102 struct processor_costs power9_cost = {
1103 COSTS_N_INSNS (3), /* mulsi */
1104 COSTS_N_INSNS (3), /* mulsi_const */
1105 COSTS_N_INSNS (3), /* mulsi_const9 */
1106 COSTS_N_INSNS (3), /* muldi */
1107 COSTS_N_INSNS (8), /* divsi */
1108 COSTS_N_INSNS (12), /* divdi */
1109 COSTS_N_INSNS (3), /* fp */
1110 COSTS_N_INSNS (3), /* dmul */
1111 COSTS_N_INSNS (13), /* sdiv */
1112 COSTS_N_INSNS (18), /* ddiv */
1113 128, /* cache line size */
1114 32, /* l1 cache */
1115 512, /* l2 cache */
1116 8, /* prefetch streams */
1117 COSTS_N_INSNS (3), /* SF->DF convert */
1120 /* Instruction costs on POWER A2 processors. */
1121 static const
1122 struct processor_costs ppca2_cost = {
1123 COSTS_N_INSNS (16), /* mulsi */
1124 COSTS_N_INSNS (16), /* mulsi_const */
1125 COSTS_N_INSNS (16), /* mulsi_const9 */
1126 COSTS_N_INSNS (16), /* muldi */
1127 COSTS_N_INSNS (22), /* divsi */
1128 COSTS_N_INSNS (28), /* divdi */
1129 COSTS_N_INSNS (3), /* fp */
1130 COSTS_N_INSNS (3), /* dmul */
1131 COSTS_N_INSNS (59), /* sdiv */
1132 COSTS_N_INSNS (72), /* ddiv */
1134 16, /* l1 cache */
1135 2048, /* l2 cache */
1136 16, /* prefetch streams */
1137 0, /* SF->DF convert */
1141 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1142 #undef RS6000_BUILTIN_0
1143 #undef RS6000_BUILTIN_1
1144 #undef RS6000_BUILTIN_2
1145 #undef RS6000_BUILTIN_3
1146 #undef RS6000_BUILTIN_A
1147 #undef RS6000_BUILTIN_D
1148 #undef RS6000_BUILTIN_E
1149 #undef RS6000_BUILTIN_H
1150 #undef RS6000_BUILTIN_P
1151 #undef RS6000_BUILTIN_Q
1152 #undef RS6000_BUILTIN_S
1153 #undef RS6000_BUILTIN_X
1155 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1156 { NAME, ICODE, MASK, ATTR },
1158 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1159 { NAME, ICODE, MASK, ATTR },
1161 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1162 { NAME, ICODE, MASK, ATTR },
1164 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1165 { NAME, ICODE, MASK, ATTR },
1167 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1168 { NAME, ICODE, MASK, ATTR },
1170 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1171 { NAME, ICODE, MASK, ATTR },
1173 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1174 { NAME, ICODE, MASK, ATTR },
1176 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1177 { NAME, ICODE, MASK, ATTR },
1179 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1180 { NAME, ICODE, MASK, ATTR },
1182 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1183 { NAME, ICODE, MASK, ATTR },
1185 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1186 { NAME, ICODE, MASK, ATTR },
1188 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1189 { NAME, ICODE, MASK, ATTR },
1191 struct rs6000_builtin_info_type {
1192 const char *name;
1193 const enum insn_code icode;
1194 const HOST_WIDE_INT mask;
1195 const unsigned attr;
1198 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1200 #include "rs6000-builtin.def"
1203 #undef RS6000_BUILTIN_0
1204 #undef RS6000_BUILTIN_1
1205 #undef RS6000_BUILTIN_2
1206 #undef RS6000_BUILTIN_3
1207 #undef RS6000_BUILTIN_A
1208 #undef RS6000_BUILTIN_D
1209 #undef RS6000_BUILTIN_E
1210 #undef RS6000_BUILTIN_H
1211 #undef RS6000_BUILTIN_P
1212 #undef RS6000_BUILTIN_Q
1213 #undef RS6000_BUILTIN_S
1214 #undef RS6000_BUILTIN_X
1216 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1217 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1220 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1221 static bool spe_func_has_64bit_regs_p (void);
1222 static struct machine_function * rs6000_init_machine_status (void);
1223 static int rs6000_ra_ever_killed (void);
1224 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1225 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1226 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1227 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1228 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1229 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1230 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1231 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1232 bool);
1233 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1234 unsigned int);
1235 static bool is_microcoded_insn (rtx_insn *);
1236 static bool is_nonpipeline_insn (rtx_insn *);
1237 static bool is_cracked_insn (rtx_insn *);
1238 static bool is_load_insn (rtx, rtx *);
1239 static bool is_store_insn (rtx, rtx *);
1240 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1241 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1242 static bool insn_must_be_first_in_group (rtx_insn *);
1243 static bool insn_must_be_last_in_group (rtx_insn *);
1244 static void altivec_init_builtins (void);
1245 static tree builtin_function_type (machine_mode, machine_mode,
1246 machine_mode, machine_mode,
1247 enum rs6000_builtins, const char *name);
1248 static void rs6000_common_init_builtins (void);
1249 static void paired_init_builtins (void);
1250 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1251 static void spe_init_builtins (void);
1252 static void htm_init_builtins (void);
1253 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1254 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1255 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1256 static rs6000_stack_t *rs6000_stack_info (void);
1257 static void is_altivec_return_reg (rtx, void *);
1258 int easy_vector_constant (rtx, machine_mode);
1259 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1260 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1261 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1262 bool, bool);
1263 #if TARGET_MACHO
1264 static void macho_branch_islands (void);
1265 #endif
1266 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1267 int, int *);
1268 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1269 int, int, int *);
1270 static bool rs6000_mode_dependent_address (const_rtx);
1271 static bool rs6000_debug_mode_dependent_address (const_rtx);
1272 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1273 machine_mode, rtx);
1274 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1275 machine_mode,
1276 rtx);
1277 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1278 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1279 enum reg_class);
1280 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1281 machine_mode);
1282 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1283 enum reg_class,
1284 machine_mode);
1285 static bool rs6000_cannot_change_mode_class (machine_mode,
1286 machine_mode,
1287 enum reg_class);
1288 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1289 machine_mode,
1290 enum reg_class);
1291 static bool rs6000_save_toc_in_prologue_p (void);
1292 static rtx rs6000_internal_arg_pointer (void);
1294 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1295 int, int *)
1296 = rs6000_legitimize_reload_address;
1298 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1299 = rs6000_mode_dependent_address;
1301 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1302 machine_mode, rtx)
1303 = rs6000_secondary_reload_class;
1305 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1306 = rs6000_preferred_reload_class;
1308 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1309 machine_mode)
1310 = rs6000_secondary_memory_needed;
1312 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1313 machine_mode,
1314 enum reg_class)
1315 = rs6000_cannot_change_mode_class;
1317 const int INSN_NOT_AVAILABLE = -1;
1319 static void rs6000_print_isa_options (FILE *, int, const char *,
1320 HOST_WIDE_INT);
1321 static void rs6000_print_builtin_options (FILE *, int, const char *,
1322 HOST_WIDE_INT);
1324 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1325 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1326 enum rs6000_reg_type,
1327 machine_mode,
1328 secondary_reload_info *,
1329 bool);
1330 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1331 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1332 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1334 /* Hash table stuff for keeping track of TOC entries. */
1336 struct GTY((for_user)) toc_hash_struct
1338 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1339 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1340 rtx key;
1341 machine_mode key_mode;
1342 int labelno;
1345 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1347 static hashval_t hash (toc_hash_struct *);
1348 static bool equal (toc_hash_struct *, toc_hash_struct *);
1351 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1353 /* Hash table to keep track of the argument types for builtin functions. */
1355 struct GTY((for_user)) builtin_hash_struct
1357 tree type;
1358 machine_mode mode[4]; /* return value + 3 arguments. */
1359 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1362 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1364 static hashval_t hash (builtin_hash_struct *);
1365 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1368 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1371 /* Default register names. */
1372 char rs6000_reg_names[][8] =
1374 "0", "1", "2", "3", "4", "5", "6", "7",
1375 "8", "9", "10", "11", "12", "13", "14", "15",
1376 "16", "17", "18", "19", "20", "21", "22", "23",
1377 "24", "25", "26", "27", "28", "29", "30", "31",
1378 "0", "1", "2", "3", "4", "5", "6", "7",
1379 "8", "9", "10", "11", "12", "13", "14", "15",
1380 "16", "17", "18", "19", "20", "21", "22", "23",
1381 "24", "25", "26", "27", "28", "29", "30", "31",
1382 "mq", "lr", "ctr","ap",
1383 "0", "1", "2", "3", "4", "5", "6", "7",
1384 "ca",
1385 /* AltiVec registers. */
1386 "0", "1", "2", "3", "4", "5", "6", "7",
1387 "8", "9", "10", "11", "12", "13", "14", "15",
1388 "16", "17", "18", "19", "20", "21", "22", "23",
1389 "24", "25", "26", "27", "28", "29", "30", "31",
1390 "vrsave", "vscr",
1391 /* SPE registers. */
1392 "spe_acc", "spefscr",
1393 /* Soft frame pointer. */
1394 "sfp",
1395 /* HTM SPR registers. */
1396 "tfhar", "tfiar", "texasr",
1397 /* SPE High registers. */
1398 "0", "1", "2", "3", "4", "5", "6", "7",
1399 "8", "9", "10", "11", "12", "13", "14", "15",
1400 "16", "17", "18", "19", "20", "21", "22", "23",
1401 "24", "25", "26", "27", "28", "29", "30", "31"
1404 #ifdef TARGET_REGNAMES
1405 static const char alt_reg_names[][8] =
1407 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1408 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1409 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1410 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1411 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1412 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1413 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1414 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1415 "mq", "lr", "ctr", "ap",
1416 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1417 "ca",
1418 /* AltiVec registers. */
1419 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1420 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1421 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1422 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1423 "vrsave", "vscr",
1424 /* SPE registers. */
1425 "spe_acc", "spefscr",
1426 /* Soft frame pointer. */
1427 "sfp",
1428 /* HTM SPR registers. */
1429 "tfhar", "tfiar", "texasr",
1430 /* SPE High registers. */
1431 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1432 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1433 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1434 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1436 #endif
1438 /* Table of valid machine attributes. */
1440 static const struct attribute_spec rs6000_attribute_table[] =
1442 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1443 affects_type_identity } */
1444 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1445 false },
1446 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1447 false },
1448 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1449 false },
1450 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1451 false },
1452 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1453 false },
1454 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1455 SUBTARGET_ATTRIBUTE_TABLE,
1456 #endif
1457 { NULL, 0, 0, false, false, false, NULL, false }
1460 #ifndef TARGET_PROFILE_KERNEL
1461 #define TARGET_PROFILE_KERNEL 0
1462 #endif
1464 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1465 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1467 /* Initialize the GCC target structure. */
1468 #undef TARGET_ATTRIBUTE_TABLE
1469 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1470 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1471 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1472 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1473 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1475 #undef TARGET_ASM_ALIGNED_DI_OP
1476 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1478 /* Default unaligned ops are only provided for ELF. Find the ops needed
1479 for non-ELF systems. */
1480 #ifndef OBJECT_FORMAT_ELF
1481 #if TARGET_XCOFF
1482 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1483 64-bit targets. */
1484 #undef TARGET_ASM_UNALIGNED_HI_OP
1485 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1486 #undef TARGET_ASM_UNALIGNED_SI_OP
1487 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1488 #undef TARGET_ASM_UNALIGNED_DI_OP
1489 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1490 #else
1491 /* For Darwin. */
1492 #undef TARGET_ASM_UNALIGNED_HI_OP
1493 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1494 #undef TARGET_ASM_UNALIGNED_SI_OP
1495 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1496 #undef TARGET_ASM_UNALIGNED_DI_OP
1497 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1498 #undef TARGET_ASM_ALIGNED_DI_OP
1499 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1500 #endif
1501 #endif
1503 /* This hook deals with fixups for relocatable code and DI-mode objects
1504 in 64-bit code. */
1505 #undef TARGET_ASM_INTEGER
1506 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1508 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1509 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1510 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1511 #endif
1513 #undef TARGET_SET_UP_BY_PROLOGUE
1514 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1516 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1517 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1519 #undef TARGET_INTERNAL_ARG_POINTER
1520 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1522 #undef TARGET_HAVE_TLS
1523 #define TARGET_HAVE_TLS HAVE_AS_TLS
1525 #undef TARGET_CANNOT_FORCE_CONST_MEM
1526 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1528 #undef TARGET_DELEGITIMIZE_ADDRESS
1529 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1531 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1532 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1534 #undef TARGET_ASM_FUNCTION_PROLOGUE
1535 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1536 #undef TARGET_ASM_FUNCTION_EPILOGUE
1537 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1539 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1540 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1542 #undef TARGET_LEGITIMIZE_ADDRESS
1543 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1545 #undef TARGET_SCHED_VARIABLE_ISSUE
1546 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1548 #undef TARGET_SCHED_ISSUE_RATE
1549 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1550 #undef TARGET_SCHED_ADJUST_COST
1551 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1552 #undef TARGET_SCHED_ADJUST_PRIORITY
1553 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1554 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1555 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1556 #undef TARGET_SCHED_INIT
1557 #define TARGET_SCHED_INIT rs6000_sched_init
1558 #undef TARGET_SCHED_FINISH
1559 #define TARGET_SCHED_FINISH rs6000_sched_finish
1560 #undef TARGET_SCHED_REORDER
1561 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1562 #undef TARGET_SCHED_REORDER2
1563 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1565 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1566 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1568 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1569 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1571 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1572 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1573 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1574 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1575 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1576 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1577 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1578 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1580 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1581 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1582 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1583 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1584 rs6000_builtin_support_vector_misalignment
1585 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1586 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1587 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1588 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1589 rs6000_builtin_vectorization_cost
1590 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1591 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1592 rs6000_preferred_simd_mode
1593 #undef TARGET_VECTORIZE_INIT_COST
1594 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1595 #undef TARGET_VECTORIZE_ADD_STMT_COST
1596 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1597 #undef TARGET_VECTORIZE_FINISH_COST
1598 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1599 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1600 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1602 #undef TARGET_INIT_BUILTINS
1603 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1604 #undef TARGET_BUILTIN_DECL
1605 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1607 #undef TARGET_FOLD_BUILTIN
1608 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1610 #undef TARGET_EXPAND_BUILTIN
1611 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1613 #undef TARGET_MANGLE_TYPE
1614 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1616 #undef TARGET_INIT_LIBFUNCS
1617 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1619 #if TARGET_MACHO
1620 #undef TARGET_BINDS_LOCAL_P
1621 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1622 #endif
1624 #undef TARGET_MS_BITFIELD_LAYOUT_P
1625 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1627 #undef TARGET_ASM_OUTPUT_MI_THUNK
1628 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1630 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1631 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1633 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1634 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1636 #undef TARGET_REGISTER_MOVE_COST
1637 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1638 #undef TARGET_MEMORY_MOVE_COST
1639 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1640 #undef TARGET_CANNOT_COPY_INSN_P
1641 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1642 #undef TARGET_RTX_COSTS
1643 #define TARGET_RTX_COSTS rs6000_rtx_costs
1644 #undef TARGET_ADDRESS_COST
1645 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1647 #undef TARGET_DWARF_REGISTER_SPAN
1648 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1650 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1651 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1653 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1654 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1656 #undef TARGET_PROMOTE_FUNCTION_MODE
1657 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1659 #undef TARGET_RETURN_IN_MEMORY
1660 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1662 #undef TARGET_RETURN_IN_MSB
1663 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1665 #undef TARGET_SETUP_INCOMING_VARARGS
1666 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1668 /* Always strict argument naming on rs6000. */
1669 #undef TARGET_STRICT_ARGUMENT_NAMING
1670 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1671 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1672 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1673 #undef TARGET_SPLIT_COMPLEX_ARG
1674 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1675 #undef TARGET_MUST_PASS_IN_STACK
1676 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1677 #undef TARGET_PASS_BY_REFERENCE
1678 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1679 #undef TARGET_ARG_PARTIAL_BYTES
1680 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1681 #undef TARGET_FUNCTION_ARG_ADVANCE
1682 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1683 #undef TARGET_FUNCTION_ARG
1684 #define TARGET_FUNCTION_ARG rs6000_function_arg
1685 #undef TARGET_FUNCTION_ARG_BOUNDARY
1686 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1688 #undef TARGET_BUILD_BUILTIN_VA_LIST
1689 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1691 #undef TARGET_EXPAND_BUILTIN_VA_START
1692 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1694 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1695 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1697 #undef TARGET_EH_RETURN_FILTER_MODE
1698 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1700 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1701 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1703 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1704 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1706 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1707 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1709 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1710 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1712 #undef TARGET_MD_ASM_ADJUST
1713 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1715 #undef TARGET_OPTION_OVERRIDE
1716 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1718 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1719 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1720 rs6000_builtin_vectorized_function
1722 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1723 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1724 rs6000_builtin_md_vectorized_function
1726 #if !TARGET_MACHO
1727 #undef TARGET_STACK_PROTECT_FAIL
1728 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1729 #endif
1731 #ifdef HAVE_AS_TLS
1732 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1733 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1734 #endif
1736 /* Use a 32-bit anchor range. This leads to sequences like:
1738 addis tmp,anchor,high
1739 add dest,tmp,low
1741 where tmp itself acts as an anchor, and can be shared between
1742 accesses to the same 64k page. */
1743 #undef TARGET_MIN_ANCHOR_OFFSET
1744 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1745 #undef TARGET_MAX_ANCHOR_OFFSET
1746 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1747 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1748 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1749 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1750 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1752 #undef TARGET_BUILTIN_RECIPROCAL
1753 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1755 #undef TARGET_EXPAND_TO_RTL_HOOK
1756 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1758 #undef TARGET_INSTANTIATE_DECLS
1759 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1761 #undef TARGET_SECONDARY_RELOAD
1762 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1764 #undef TARGET_LEGITIMATE_ADDRESS_P
1765 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1767 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1768 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1770 #undef TARGET_LRA_P
1771 #define TARGET_LRA_P rs6000_lra_p
1773 #undef TARGET_CAN_ELIMINATE
1774 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1776 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1777 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1779 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1780 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1782 #undef TARGET_TRAMPOLINE_INIT
1783 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1785 #undef TARGET_FUNCTION_VALUE
1786 #define TARGET_FUNCTION_VALUE rs6000_function_value
1788 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1789 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1791 #undef TARGET_OPTION_SAVE
1792 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1794 #undef TARGET_OPTION_RESTORE
1795 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1797 #undef TARGET_OPTION_PRINT
1798 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1800 #undef TARGET_CAN_INLINE_P
1801 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1803 #undef TARGET_SET_CURRENT_FUNCTION
1804 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1806 #undef TARGET_LEGITIMATE_CONSTANT_P
1807 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1809 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1810 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1812 #undef TARGET_CAN_USE_DOLOOP_P
1813 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1815 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1816 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1818 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1819 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1820 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1821 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1822 #undef TARGET_UNWIND_WORD_MODE
1823 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1825 #undef TARGET_OFFLOAD_OPTIONS
1826 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1828 #undef TARGET_C_MODE_FOR_SUFFIX
1829 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1831 #undef TARGET_INVALID_BINARY_OP
1832 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1834 #undef TARGET_OPTAB_SUPPORTED_P
1835 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1838 /* Processor table. */
1839 struct rs6000_ptt
1841 const char *const name; /* Canonical processor name. */
1842 const enum processor_type processor; /* Processor type enum value. */
1843 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1846 static struct rs6000_ptt const processor_target_table[] =
1848 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1849 #include "rs6000-cpus.def"
1850 #undef RS6000_CPU
1853 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1854 name is invalid. */
1856 static int
1857 rs6000_cpu_name_lookup (const char *name)
1859 size_t i;
1861 if (name != NULL)
1863 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1864 if (! strcmp (name, processor_target_table[i].name))
1865 return (int)i;
1868 return -1;
1872 /* Return number of consecutive hard regs needed starting at reg REGNO
1873 to hold something of mode MODE.
1874 This is ordinarily the length in words of a value of mode MODE
1875 but can be less for certain modes in special long registers.
1877 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1878 scalar instructions. The upper 32 bits are only available to the
1879 SIMD instructions.
1881 POWER and PowerPC GPRs hold 32 bits worth;
1882 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1884 static int
1885 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
1887 unsigned HOST_WIDE_INT reg_size;
1889 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
1890 128-bit floating point that can go in vector registers, which has VSX
1891 memory addressing. */
1892 if (FP_REGNO_P (regno))
1893 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
1894 ? UNITS_PER_VSX_WORD
1895 : UNITS_PER_FP_WORD);
1897 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1898 reg_size = UNITS_PER_SPE_WORD;
1900 else if (ALTIVEC_REGNO_P (regno))
1901 reg_size = UNITS_PER_ALTIVEC_WORD;
1903 /* The value returned for SCmode in the E500 double case is 2 for
1904 ABI compatibility; storing an SCmode value in a single register
1905 would require function_arg and rs6000_spe_function_arg to handle
1906 SCmode so as to pass the value correctly in a pair of
1907 registers. */
1908 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1909 && !DECIMAL_FLOAT_MODE_P (mode) && SPE_SIMD_REGNO_P (regno))
1910 reg_size = UNITS_PER_FP_WORD;
1912 else
1913 reg_size = UNITS_PER_WORD;
1915 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1918 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1919 MODE. */
1920 static int
1921 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
1923 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1925 if (COMPLEX_MODE_P (mode))
1926 mode = GET_MODE_INNER (mode);
1928 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1929 register combinations, and use PTImode where we need to deal with quad
1930 word memory operations. Don't allow quad words in the argument or frame
1931 pointer registers, just registers 0..31. */
1932 if (mode == PTImode)
1933 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1934 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1935 && ((regno & 1) == 0));
1937 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1938 implementations. Don't allow an item to be split between a FP register
1939 and an Altivec register. Allow TImode in all VSX registers if the user
1940 asked for it. */
1941 if (TARGET_VSX && VSX_REGNO_P (regno)
1942 && (VECTOR_MEM_VSX_P (mode)
1943 || FLOAT128_VECTOR_P (mode)
1944 || reg_addr[mode].scalar_in_vmx_p
1945 || (TARGET_VSX_TIMODE && mode == TImode)
1946 || (TARGET_VADDUQM && mode == V1TImode)
1947 || (TARGET_UPPER_REGS_DI && mode == DImode)))
1949 if (FP_REGNO_P (regno))
1950 return FP_REGNO_P (last_regno);
1952 if (ALTIVEC_REGNO_P (regno))
1954 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
1955 return 0;
1957 return ALTIVEC_REGNO_P (last_regno);
1961 /* The GPRs can hold any mode, but values bigger than one register
1962 cannot go past R31. */
1963 if (INT_REGNO_P (regno))
1964 return INT_REGNO_P (last_regno);
1966 /* The float registers (except for VSX vector modes) can only hold floating
1967 modes and DImode. */
1968 if (FP_REGNO_P (regno))
1970 if (FLOAT128_VECTOR_P (mode))
1971 return false;
1973 if (SCALAR_FLOAT_MODE_P (mode)
1974 && (mode != TDmode || (regno % 2) == 0)
1975 && FP_REGNO_P (last_regno))
1976 return 1;
1978 if (GET_MODE_CLASS (mode) == MODE_INT
1979 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1980 return 1;
1982 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1983 && PAIRED_VECTOR_MODE (mode))
1984 return 1;
1986 return 0;
1989 /* The CR register can only hold CC modes. */
1990 if (CR_REGNO_P (regno))
1991 return GET_MODE_CLASS (mode) == MODE_CC;
1993 if (CA_REGNO_P (regno))
1994 return mode == Pmode || mode == SImode;
1996 /* AltiVec only in AldyVec registers. */
1997 if (ALTIVEC_REGNO_P (regno))
1998 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1999 || mode == V1TImode);
2001 /* ...but GPRs can hold SIMD data on the SPE in one register. */
2002 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
2003 return 1;
2005 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2006 and it must be able to fit within the register set. */
2008 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2011 /* Print interesting facts about registers. */
2012 static void
2013 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2015 int r, m;
2017 for (r = first_regno; r <= last_regno; ++r)
2019 const char *comma = "";
2020 int len;
2022 if (first_regno == last_regno)
2023 fprintf (stderr, "%s:\t", reg_name);
2024 else
2025 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2027 len = 8;
2028 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2029 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2031 if (len > 70)
2033 fprintf (stderr, ",\n\t");
2034 len = 8;
2035 comma = "";
2038 if (rs6000_hard_regno_nregs[m][r] > 1)
2039 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2040 rs6000_hard_regno_nregs[m][r]);
2041 else
2042 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2044 comma = ", ";
2047 if (call_used_regs[r])
2049 if (len > 70)
2051 fprintf (stderr, ",\n\t");
2052 len = 8;
2053 comma = "";
2056 len += fprintf (stderr, "%s%s", comma, "call-used");
2057 comma = ", ";
2060 if (fixed_regs[r])
2062 if (len > 70)
2064 fprintf (stderr, ",\n\t");
2065 len = 8;
2066 comma = "";
2069 len += fprintf (stderr, "%s%s", comma, "fixed");
2070 comma = ", ";
2073 if (len > 70)
2075 fprintf (stderr, ",\n\t");
2076 comma = "";
2079 len += fprintf (stderr, "%sreg-class = %s", comma,
2080 reg_class_names[(int)rs6000_regno_regclass[r]]);
2081 comma = ", ";
2083 if (len > 70)
2085 fprintf (stderr, ",\n\t");
2086 comma = "";
2089 fprintf (stderr, "%sregno = %d\n", comma, r);
2093 static const char *
2094 rs6000_debug_vector_unit (enum rs6000_vector v)
2096 const char *ret;
2098 switch (v)
2100 case VECTOR_NONE: ret = "none"; break;
2101 case VECTOR_ALTIVEC: ret = "altivec"; break;
2102 case VECTOR_VSX: ret = "vsx"; break;
2103 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2104 case VECTOR_PAIRED: ret = "paired"; break;
2105 case VECTOR_SPE: ret = "spe"; break;
2106 case VECTOR_OTHER: ret = "other"; break;
2107 default: ret = "unknown"; break;
2110 return ret;
2113 /* Inner function printing just the address mask for a particular reload
2114 register class. */
2115 DEBUG_FUNCTION char *
2116 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2118 static char ret[8];
2119 char *p = ret;
2121 if ((mask & RELOAD_REG_VALID) != 0)
2122 *p++ = 'v';
2123 else if (keep_spaces)
2124 *p++ = ' ';
2126 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2127 *p++ = 'm';
2128 else if (keep_spaces)
2129 *p++ = ' ';
2131 if ((mask & RELOAD_REG_INDEXED) != 0)
2132 *p++ = 'i';
2133 else if (keep_spaces)
2134 *p++ = ' ';
2136 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2137 *p++ = 'O';
2138 else if ((mask & RELOAD_REG_OFFSET) != 0)
2139 *p++ = 'o';
2140 else if (keep_spaces)
2141 *p++ = ' ';
2143 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2144 *p++ = '+';
2145 else if (keep_spaces)
2146 *p++ = ' ';
2148 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2149 *p++ = '+';
2150 else if (keep_spaces)
2151 *p++ = ' ';
2153 if ((mask & RELOAD_REG_AND_M16) != 0)
2154 *p++ = '&';
2155 else if (keep_spaces)
2156 *p++ = ' ';
2158 *p = '\0';
2160 return ret;
2163 /* Print the address masks in a human readble fashion. */
2164 DEBUG_FUNCTION void
2165 rs6000_debug_print_mode (ssize_t m)
2167 ssize_t rc;
2168 int spaces = 0;
2169 bool fuse_extra_p;
2171 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2172 for (rc = 0; rc < N_RELOAD_REG; rc++)
2173 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2174 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2176 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2177 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2178 fprintf (stderr, " Reload=%c%c",
2179 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2180 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2181 else
2182 spaces += sizeof (" Reload=sl") - 1;
2184 if (reg_addr[m].scalar_in_vmx_p)
2186 fprintf (stderr, "%*s Upper=y", spaces, "");
2187 spaces = 0;
2189 else
2190 spaces += sizeof (" Upper=y") - 1;
2192 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2193 || reg_addr[m].fused_toc);
2194 if (!fuse_extra_p)
2196 for (rc = 0; rc < N_RELOAD_REG; rc++)
2198 if (rc != RELOAD_REG_ANY)
2200 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2201 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2202 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2203 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2204 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2206 fuse_extra_p = true;
2207 break;
2213 if (fuse_extra_p)
2215 fprintf (stderr, "%*s Fuse:", spaces, "");
2216 spaces = 0;
2218 for (rc = 0; rc < N_RELOAD_REG; rc++)
2220 if (rc != RELOAD_REG_ANY)
2222 char load, store;
2224 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2225 load = 'l';
2226 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2227 load = 'L';
2228 else
2229 load = '-';
2231 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2232 store = 's';
2233 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2234 store = 'S';
2235 else
2236 store = '-';
2238 if (load == '-' && store == '-')
2239 spaces += 5;
2240 else
2242 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2243 reload_reg_map[rc].name[0], load, store);
2244 spaces = 0;
2249 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2251 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2252 spaces = 0;
2254 else
2255 spaces += sizeof (" P8gpr") - 1;
2257 if (reg_addr[m].fused_toc)
2259 fprintf (stderr, "%*sToc", (spaces + 1), "");
2260 spaces = 0;
2262 else
2263 spaces += sizeof (" Toc") - 1;
2265 else
2266 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2268 if (rs6000_vector_unit[m] != VECTOR_NONE
2269 || rs6000_vector_mem[m] != VECTOR_NONE)
2271 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2272 spaces, "",
2273 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2274 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2277 fputs ("\n", stderr);
2280 #define DEBUG_FMT_ID "%-32s= "
2281 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2282 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2283 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2285 /* Print various interesting information with -mdebug=reg. */
2286 static void
2287 rs6000_debug_reg_global (void)
2289 static const char *const tf[2] = { "false", "true" };
2290 const char *nl = (const char *)0;
2291 int m;
2292 size_t m1, m2, v;
2293 char costly_num[20];
2294 char nop_num[20];
2295 char flags_buffer[40];
2296 const char *costly_str;
2297 const char *nop_str;
2298 const char *trace_str;
2299 const char *abi_str;
2300 const char *cmodel_str;
2301 struct cl_target_option cl_opts;
2303 /* Modes we want tieable information on. */
2304 static const machine_mode print_tieable_modes[] = {
2305 QImode,
2306 HImode,
2307 SImode,
2308 DImode,
2309 TImode,
2310 PTImode,
2311 SFmode,
2312 DFmode,
2313 TFmode,
2314 IFmode,
2315 KFmode,
2316 SDmode,
2317 DDmode,
2318 TDmode,
2319 V8QImode,
2320 V4HImode,
2321 V2SImode,
2322 V16QImode,
2323 V8HImode,
2324 V4SImode,
2325 V2DImode,
2326 V1TImode,
2327 V32QImode,
2328 V16HImode,
2329 V8SImode,
2330 V4DImode,
2331 V2TImode,
2332 V2SFmode,
2333 V4SFmode,
2334 V2DFmode,
2335 V8SFmode,
2336 V4DFmode,
2337 CCmode,
2338 CCUNSmode,
2339 CCEQmode,
2342 /* Virtual regs we are interested in. */
2343 const static struct {
2344 int regno; /* register number. */
2345 const char *name; /* register name. */
2346 } virtual_regs[] = {
2347 { STACK_POINTER_REGNUM, "stack pointer:" },
2348 { TOC_REGNUM, "toc: " },
2349 { STATIC_CHAIN_REGNUM, "static chain: " },
2350 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2351 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2352 { ARG_POINTER_REGNUM, "arg pointer: " },
2353 { FRAME_POINTER_REGNUM, "frame pointer:" },
2354 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2355 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2356 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2357 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2358 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2359 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2360 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2361 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2362 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2365 fputs ("\nHard register information:\n", stderr);
2366 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2367 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2368 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2369 LAST_ALTIVEC_REGNO,
2370 "vs");
2371 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2372 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2373 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2374 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2375 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2376 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2377 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2378 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2380 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2381 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2382 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2384 fprintf (stderr,
2385 "\n"
2386 "d reg_class = %s\n"
2387 "f reg_class = %s\n"
2388 "v reg_class = %s\n"
2389 "wa reg_class = %s\n"
2390 "wb reg_class = %s\n"
2391 "wd reg_class = %s\n"
2392 "we reg_class = %s\n"
2393 "wf reg_class = %s\n"
2394 "wg reg_class = %s\n"
2395 "wh reg_class = %s\n"
2396 "wi reg_class = %s\n"
2397 "wj reg_class = %s\n"
2398 "wk reg_class = %s\n"
2399 "wl reg_class = %s\n"
2400 "wm reg_class = %s\n"
2401 "wo reg_class = %s\n"
2402 "wp reg_class = %s\n"
2403 "wq reg_class = %s\n"
2404 "wr reg_class = %s\n"
2405 "ws reg_class = %s\n"
2406 "wt reg_class = %s\n"
2407 "wu reg_class = %s\n"
2408 "wv reg_class = %s\n"
2409 "ww reg_class = %s\n"
2410 "wx reg_class = %s\n"
2411 "wy reg_class = %s\n"
2412 "wz reg_class = %s\n"
2413 "\n",
2414 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2415 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2416 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2417 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2418 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2419 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2420 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2421 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2422 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2423 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2424 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2425 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2426 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2427 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2428 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2429 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2430 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2431 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2432 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2433 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2434 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2435 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2436 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2437 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2438 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2439 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2440 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2442 nl = "\n";
2443 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2444 rs6000_debug_print_mode (m);
2446 fputs ("\n", stderr);
2448 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2450 machine_mode mode1 = print_tieable_modes[m1];
2451 bool first_time = true;
2453 nl = (const char *)0;
2454 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2456 machine_mode mode2 = print_tieable_modes[m2];
2457 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2459 if (first_time)
2461 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2462 nl = "\n";
2463 first_time = false;
2466 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2470 if (!first_time)
2471 fputs ("\n", stderr);
2474 if (nl)
2475 fputs (nl, stderr);
2477 if (rs6000_recip_control)
2479 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2481 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2482 if (rs6000_recip_bits[m])
2484 fprintf (stderr,
2485 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2486 GET_MODE_NAME (m),
2487 (RS6000_RECIP_AUTO_RE_P (m)
2488 ? "auto"
2489 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2490 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2491 ? "auto"
2492 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2495 fputs ("\n", stderr);
2498 if (rs6000_cpu_index >= 0)
2500 const char *name = processor_target_table[rs6000_cpu_index].name;
2501 HOST_WIDE_INT flags
2502 = processor_target_table[rs6000_cpu_index].target_enable;
2504 sprintf (flags_buffer, "-mcpu=%s flags", name);
2505 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2507 else
2508 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2510 if (rs6000_tune_index >= 0)
2512 const char *name = processor_target_table[rs6000_tune_index].name;
2513 HOST_WIDE_INT flags
2514 = processor_target_table[rs6000_tune_index].target_enable;
2516 sprintf (flags_buffer, "-mtune=%s flags", name);
2517 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2519 else
2520 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2522 cl_target_option_save (&cl_opts, &global_options);
2523 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2524 rs6000_isa_flags);
2526 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2527 rs6000_isa_flags_explicit);
2529 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2530 rs6000_builtin_mask);
2532 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2534 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2535 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2537 switch (rs6000_sched_costly_dep)
2539 case max_dep_latency:
2540 costly_str = "max_dep_latency";
2541 break;
2543 case no_dep_costly:
2544 costly_str = "no_dep_costly";
2545 break;
2547 case all_deps_costly:
2548 costly_str = "all_deps_costly";
2549 break;
2551 case true_store_to_load_dep_costly:
2552 costly_str = "true_store_to_load_dep_costly";
2553 break;
2555 case store_to_load_dep_costly:
2556 costly_str = "store_to_load_dep_costly";
2557 break;
2559 default:
2560 costly_str = costly_num;
2561 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2562 break;
2565 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2567 switch (rs6000_sched_insert_nops)
2569 case sched_finish_regroup_exact:
2570 nop_str = "sched_finish_regroup_exact";
2571 break;
2573 case sched_finish_pad_groups:
2574 nop_str = "sched_finish_pad_groups";
2575 break;
2577 case sched_finish_none:
2578 nop_str = "sched_finish_none";
2579 break;
2581 default:
2582 nop_str = nop_num;
2583 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2584 break;
2587 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2589 switch (rs6000_sdata)
2591 default:
2592 case SDATA_NONE:
2593 break;
2595 case SDATA_DATA:
2596 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2597 break;
2599 case SDATA_SYSV:
2600 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2601 break;
2603 case SDATA_EABI:
2604 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2605 break;
2609 switch (rs6000_traceback)
2611 case traceback_default: trace_str = "default"; break;
2612 case traceback_none: trace_str = "none"; break;
2613 case traceback_part: trace_str = "part"; break;
2614 case traceback_full: trace_str = "full"; break;
2615 default: trace_str = "unknown"; break;
2618 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2620 switch (rs6000_current_cmodel)
2622 case CMODEL_SMALL: cmodel_str = "small"; break;
2623 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2624 case CMODEL_LARGE: cmodel_str = "large"; break;
2625 default: cmodel_str = "unknown"; break;
2628 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2630 switch (rs6000_current_abi)
2632 case ABI_NONE: abi_str = "none"; break;
2633 case ABI_AIX: abi_str = "aix"; break;
2634 case ABI_ELFv2: abi_str = "ELFv2"; break;
2635 case ABI_V4: abi_str = "V4"; break;
2636 case ABI_DARWIN: abi_str = "darwin"; break;
2637 default: abi_str = "unknown"; break;
2640 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2642 if (rs6000_altivec_abi)
2643 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2645 if (rs6000_spe_abi)
2646 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2648 if (rs6000_darwin64_abi)
2649 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2651 if (rs6000_float_gprs)
2652 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2654 fprintf (stderr, DEBUG_FMT_S, "fprs",
2655 (TARGET_FPRS ? "true" : "false"));
2657 fprintf (stderr, DEBUG_FMT_S, "single_float",
2658 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2660 fprintf (stderr, DEBUG_FMT_S, "double_float",
2661 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2663 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2664 (TARGET_SOFT_FLOAT ? "true" : "false"));
2666 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2667 (TARGET_E500_SINGLE ? "true" : "false"));
2669 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2670 (TARGET_E500_DOUBLE ? "true" : "false"));
2672 if (TARGET_LINK_STACK)
2673 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2675 fprintf (stderr, DEBUG_FMT_S, "lra", TARGET_LRA ? "true" : "false");
2677 if (TARGET_P8_FUSION)
2679 char options[80];
2681 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2682 if (TARGET_TOC_FUSION)
2683 strcat (options, ", toc");
2685 if (TARGET_P8_FUSION_SIGN)
2686 strcat (options, ", sign");
2688 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2691 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2692 TARGET_SECURE_PLT ? "secure" : "bss");
2693 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2694 aix_struct_return ? "aix" : "sysv");
2695 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2696 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2697 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2698 tf[!!rs6000_align_branch_targets]);
2699 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2700 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2701 rs6000_long_double_type_size);
2702 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2703 (int)rs6000_sched_restricted_insns_priority);
2704 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2705 (int)END_BUILTINS);
2706 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2707 (int)RS6000_BUILTIN_COUNT);
2709 if (TARGET_VSX)
2710 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2711 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2713 if (TARGET_DIRECT_MOVE_128)
2714 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2715 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2719 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2720 legitimate address support to figure out the appropriate addressing to
2721 use. */
2723 static void
2724 rs6000_setup_reg_addr_masks (void)
2726 ssize_t rc, reg, m, nregs;
2727 addr_mask_type any_addr_mask, addr_mask;
2729 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2731 machine_mode m2 = (machine_mode) m;
2732 bool complex_p = false;
2733 size_t msize;
2735 if (COMPLEX_MODE_P (m2))
2737 complex_p = true;
2738 m2 = GET_MODE_INNER (m2);
2741 msize = GET_MODE_SIZE (m2);
2743 /* SDmode is special in that we want to access it only via REG+REG
2744 addressing on power7 and above, since we want to use the LFIWZX and
2745 STFIWZX instructions to load it. */
2746 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2748 any_addr_mask = 0;
2749 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2751 addr_mask = 0;
2752 reg = reload_reg_map[rc].reg;
2754 /* Can mode values go in the GPR/FPR/Altivec registers? */
2755 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2757 nregs = rs6000_hard_regno_nregs[m][reg];
2758 addr_mask |= RELOAD_REG_VALID;
2760 /* Indicate if the mode takes more than 1 physical register. If
2761 it takes a single register, indicate it can do REG+REG
2762 addressing. */
2763 if (nregs > 1 || m == BLKmode || complex_p)
2764 addr_mask |= RELOAD_REG_MULTIPLE;
2765 else
2766 addr_mask |= RELOAD_REG_INDEXED;
2768 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2769 addressing. Restrict addressing on SPE for 64-bit types
2770 because of the SUBREG hackery used to address 64-bit floats in
2771 '32-bit' GPRs. If we allow scalars into Altivec registers,
2772 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2774 if (TARGET_UPDATE
2775 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2776 && msize <= 8
2777 && !VECTOR_MODE_P (m2)
2778 && !FLOAT128_VECTOR_P (m2)
2779 && !complex_p
2780 && (m2 != DFmode || !TARGET_UPPER_REGS_DF)
2781 && (m2 != SFmode || !TARGET_UPPER_REGS_SF)
2782 && !(TARGET_E500_DOUBLE && msize == 8))
2784 addr_mask |= RELOAD_REG_PRE_INCDEC;
2786 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2787 we don't allow PRE_MODIFY for some multi-register
2788 operations. */
2789 switch (m)
2791 default:
2792 addr_mask |= RELOAD_REG_PRE_MODIFY;
2793 break;
2795 case DImode:
2796 if (TARGET_POWERPC64)
2797 addr_mask |= RELOAD_REG_PRE_MODIFY;
2798 break;
2800 case DFmode:
2801 case DDmode:
2802 if (TARGET_DF_INSN)
2803 addr_mask |= RELOAD_REG_PRE_MODIFY;
2804 break;
2809 /* GPR and FPR registers can do REG+OFFSET addressing, except
2810 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2811 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2812 if ((addr_mask != 0) && !indexed_only_p
2813 && msize <= 8
2814 && (rc == RELOAD_REG_GPR
2815 || ((msize == 8 || m2 == SFmode)
2816 && (rc == RELOAD_REG_FPR
2817 || (rc == RELOAD_REG_VMX
2818 && TARGET_P9_DFORM_SCALAR)))))
2819 addr_mask |= RELOAD_REG_OFFSET;
2821 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2822 instructions are enabled. The offset for 128-bit VSX registers is
2823 only 12-bits. While GPRs can handle the full offset range, VSX
2824 registers can only handle the restricted range. */
2825 else if ((addr_mask != 0) && !indexed_only_p
2826 && msize == 16 && TARGET_P9_DFORM_VECTOR
2827 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2828 || (m2 == TImode && TARGET_VSX_TIMODE)))
2830 addr_mask |= RELOAD_REG_OFFSET;
2831 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2832 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2835 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2836 addressing on 128-bit types. */
2837 if (rc == RELOAD_REG_VMX && msize == 16
2838 && (addr_mask & RELOAD_REG_VALID) != 0)
2839 addr_mask |= RELOAD_REG_AND_M16;
2841 reg_addr[m].addr_mask[rc] = addr_mask;
2842 any_addr_mask |= addr_mask;
2845 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2850 /* Initialize the various global tables that are based on register size. */
2851 static void
2852 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2854 ssize_t r, m, c;
2855 int align64;
2856 int align32;
2858 /* Precalculate REGNO_REG_CLASS. */
2859 rs6000_regno_regclass[0] = GENERAL_REGS;
2860 for (r = 1; r < 32; ++r)
2861 rs6000_regno_regclass[r] = BASE_REGS;
2863 for (r = 32; r < 64; ++r)
2864 rs6000_regno_regclass[r] = FLOAT_REGS;
2866 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2867 rs6000_regno_regclass[r] = NO_REGS;
2869 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2870 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2872 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2873 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2874 rs6000_regno_regclass[r] = CR_REGS;
2876 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2877 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2878 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2879 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2880 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2881 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2882 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2883 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2884 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2885 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2886 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2887 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2889 /* Precalculate register class to simpler reload register class. We don't
2890 need all of the register classes that are combinations of different
2891 classes, just the simple ones that have constraint letters. */
2892 for (c = 0; c < N_REG_CLASSES; c++)
2893 reg_class_to_reg_type[c] = NO_REG_TYPE;
2895 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2896 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2897 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2898 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2899 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2900 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2901 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2902 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2903 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2904 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2905 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2906 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2908 if (TARGET_VSX)
2910 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2911 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2913 else
2915 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2916 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2919 /* Precalculate the valid memory formats as well as the vector information,
2920 this must be set up before the rs6000_hard_regno_nregs_internal calls
2921 below. */
2922 gcc_assert ((int)VECTOR_NONE == 0);
2923 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2924 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2926 gcc_assert ((int)CODE_FOR_nothing == 0);
2927 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2929 gcc_assert ((int)NO_REGS == 0);
2930 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2932 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2933 believes it can use native alignment or still uses 128-bit alignment. */
2934 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2936 align64 = 64;
2937 align32 = 32;
2939 else
2941 align64 = 128;
2942 align32 = 128;
2945 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
2946 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
2947 if (TARGET_FLOAT128)
2949 rs6000_vector_mem[KFmode] = VECTOR_VSX;
2950 rs6000_vector_align[KFmode] = 128;
2952 if (FLOAT128_IEEE_P (TFmode))
2954 rs6000_vector_mem[TFmode] = VECTOR_VSX;
2955 rs6000_vector_align[TFmode] = 128;
2959 /* V2DF mode, VSX only. */
2960 if (TARGET_VSX)
2962 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2963 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2964 rs6000_vector_align[V2DFmode] = align64;
2967 /* V4SF mode, either VSX or Altivec. */
2968 if (TARGET_VSX)
2970 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2971 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2972 rs6000_vector_align[V4SFmode] = align32;
2974 else if (TARGET_ALTIVEC)
2976 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2977 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2978 rs6000_vector_align[V4SFmode] = align32;
2981 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2982 and stores. */
2983 if (TARGET_ALTIVEC)
2985 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2986 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2987 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2988 rs6000_vector_align[V4SImode] = align32;
2989 rs6000_vector_align[V8HImode] = align32;
2990 rs6000_vector_align[V16QImode] = align32;
2992 if (TARGET_VSX)
2994 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2995 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2996 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2998 else
3000 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3001 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3002 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3006 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3007 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3008 if (TARGET_VSX)
3010 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3011 rs6000_vector_unit[V2DImode]
3012 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3013 rs6000_vector_align[V2DImode] = align64;
3015 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3016 rs6000_vector_unit[V1TImode]
3017 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3018 rs6000_vector_align[V1TImode] = 128;
3021 /* DFmode, see if we want to use the VSX unit. Memory is handled
3022 differently, so don't set rs6000_vector_mem. */
3023 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
3025 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3026 rs6000_vector_align[DFmode] = 64;
3029 /* SFmode, see if we want to use the VSX unit. */
3030 if (TARGET_P8_VECTOR && TARGET_VSX_SCALAR_FLOAT)
3032 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3033 rs6000_vector_align[SFmode] = 32;
3036 /* Allow TImode in VSX register and set the VSX memory macros. */
3037 if (TARGET_VSX && TARGET_VSX_TIMODE)
3039 rs6000_vector_mem[TImode] = VECTOR_VSX;
3040 rs6000_vector_align[TImode] = align64;
3043 /* TODO add SPE and paired floating point vector support. */
3045 /* Register class constraints for the constraints that depend on compile
3046 switches. When the VSX code was added, different constraints were added
3047 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3048 of the VSX registers are used. The register classes for scalar floating
3049 point types is set, based on whether we allow that type into the upper
3050 (Altivec) registers. GCC has register classes to target the Altivec
3051 registers for load/store operations, to select using a VSX memory
3052 operation instead of the traditional floating point operation. The
3053 constraints are:
3055 d - Register class to use with traditional DFmode instructions.
3056 f - Register class to use with traditional SFmode instructions.
3057 v - Altivec register.
3058 wa - Any VSX register.
3059 wc - Reserved to represent individual CR bits (used in LLVM).
3060 wd - Preferred register class for V2DFmode.
3061 wf - Preferred register class for V4SFmode.
3062 wg - Float register for power6x move insns.
3063 wh - FP register for direct move instructions.
3064 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3065 wj - FP or VSX register to hold 64-bit integers for direct moves.
3066 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3067 wl - Float register if we can do 32-bit signed int loads.
3068 wm - VSX register for ISA 2.07 direct move operations.
3069 wn - always NO_REGS.
3070 wr - GPR if 64-bit mode is permitted.
3071 ws - Register class to do ISA 2.06 DF operations.
3072 wt - VSX register for TImode in VSX registers.
3073 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3074 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3075 ww - Register class to do SF conversions in with VSX operations.
3076 wx - Float register if we can do 32-bit int stores.
3077 wy - Register class to do ISA 2.07 SF operations.
3078 wz - Float register if we can do 32-bit unsigned int loads. */
3080 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3081 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3083 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3084 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3086 if (TARGET_VSX)
3088 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3089 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3090 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3092 if (TARGET_VSX_TIMODE)
3093 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3095 if (TARGET_UPPER_REGS_DF) /* DFmode */
3097 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
3098 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
3100 else
3101 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
3103 if (TARGET_UPPER_REGS_DF) /* DImode */
3104 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS;
3105 else
3106 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS;
3109 /* Add conditional constraints based on various options, to allow us to
3110 collapse multiple insn patterns. */
3111 if (TARGET_ALTIVEC)
3112 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3114 if (TARGET_MFPGPR) /* DFmode */
3115 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3117 if (TARGET_LFIWAX)
3118 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3120 if (TARGET_DIRECT_MOVE)
3122 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3123 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3124 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3125 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3126 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3127 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3130 if (TARGET_POWERPC64)
3131 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3133 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
3135 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3136 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3137 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3139 else if (TARGET_P8_VECTOR)
3141 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
3142 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3144 else if (TARGET_VSX)
3145 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3147 if (TARGET_STFIWX)
3148 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3150 if (TARGET_LFIWZX)
3151 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3153 if (TARGET_FLOAT128)
3155 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3156 if (FLOAT128_IEEE_P (TFmode))
3157 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3160 /* Support for new D-form instructions. */
3161 if (TARGET_P9_DFORM_SCALAR)
3162 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3164 /* Support for ISA 3.0 (power9) vectors. */
3165 if (TARGET_P9_VECTOR)
3166 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3168 /* Support for new direct moves (ISA 3.0 + 64bit). */
3169 if (TARGET_DIRECT_MOVE_128)
3170 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3172 /* Set up the reload helper and direct move functions. */
3173 if (TARGET_VSX || TARGET_ALTIVEC)
3175 if (TARGET_64BIT)
3177 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3178 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3179 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3180 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3181 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3182 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3183 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3184 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3185 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3186 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3187 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3188 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3189 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3190 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3191 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3192 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3193 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3194 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3195 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3196 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3198 if (FLOAT128_VECTOR_P (KFmode))
3200 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3201 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3204 if (FLOAT128_VECTOR_P (TFmode))
3206 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3207 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3210 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3211 available. */
3212 if (TARGET_NO_SDMODE_STACK)
3214 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3215 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3218 if (TARGET_VSX_TIMODE)
3220 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3221 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3224 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3226 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3227 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3228 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3229 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3230 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3231 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3232 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3233 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3234 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3236 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3237 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3238 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3239 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3240 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3241 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3242 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3243 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3244 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3246 if (FLOAT128_VECTOR_P (KFmode))
3248 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3249 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3252 if (FLOAT128_VECTOR_P (TFmode))
3254 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3255 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3259 else
3261 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3262 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3263 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3264 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3265 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3266 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3267 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3268 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3269 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3270 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3271 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3272 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3273 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3274 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3275 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3276 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3277 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3278 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3279 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3280 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3282 if (FLOAT128_VECTOR_P (KFmode))
3284 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3285 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3288 if (FLOAT128_IEEE_P (TFmode))
3290 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3291 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3294 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3295 available. */
3296 if (TARGET_NO_SDMODE_STACK)
3298 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3299 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3302 if (TARGET_VSX_TIMODE)
3304 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3305 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3308 if (TARGET_DIRECT_MOVE)
3310 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3311 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3312 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3316 if (TARGET_UPPER_REGS_DF)
3317 reg_addr[DFmode].scalar_in_vmx_p = true;
3319 if (TARGET_UPPER_REGS_DI)
3320 reg_addr[DImode].scalar_in_vmx_p = true;
3322 if (TARGET_UPPER_REGS_SF)
3323 reg_addr[SFmode].scalar_in_vmx_p = true;
3326 /* Setup the fusion operations. */
3327 if (TARGET_P8_FUSION)
3329 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3330 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3331 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3332 if (TARGET_64BIT)
3333 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3336 if (TARGET_P9_FUSION)
3338 struct fuse_insns {
3339 enum machine_mode mode; /* mode of the fused type. */
3340 enum machine_mode pmode; /* pointer mode. */
3341 enum rs6000_reload_reg_type rtype; /* register type. */
3342 enum insn_code load; /* load insn. */
3343 enum insn_code store; /* store insn. */
3346 static const struct fuse_insns addis_insns[] = {
3347 { SFmode, DImode, RELOAD_REG_FPR,
3348 CODE_FOR_fusion_fpr_di_sf_load,
3349 CODE_FOR_fusion_fpr_di_sf_store },
3351 { SFmode, SImode, RELOAD_REG_FPR,
3352 CODE_FOR_fusion_fpr_si_sf_load,
3353 CODE_FOR_fusion_fpr_si_sf_store },
3355 { DFmode, DImode, RELOAD_REG_FPR,
3356 CODE_FOR_fusion_fpr_di_df_load,
3357 CODE_FOR_fusion_fpr_di_df_store },
3359 { DFmode, SImode, RELOAD_REG_FPR,
3360 CODE_FOR_fusion_fpr_si_df_load,
3361 CODE_FOR_fusion_fpr_si_df_store },
3363 { DImode, DImode, RELOAD_REG_FPR,
3364 CODE_FOR_fusion_fpr_di_di_load,
3365 CODE_FOR_fusion_fpr_di_di_store },
3367 { DImode, SImode, RELOAD_REG_FPR,
3368 CODE_FOR_fusion_fpr_si_di_load,
3369 CODE_FOR_fusion_fpr_si_di_store },
3371 { QImode, DImode, RELOAD_REG_GPR,
3372 CODE_FOR_fusion_gpr_di_qi_load,
3373 CODE_FOR_fusion_gpr_di_qi_store },
3375 { QImode, SImode, RELOAD_REG_GPR,
3376 CODE_FOR_fusion_gpr_si_qi_load,
3377 CODE_FOR_fusion_gpr_si_qi_store },
3379 { HImode, DImode, RELOAD_REG_GPR,
3380 CODE_FOR_fusion_gpr_di_hi_load,
3381 CODE_FOR_fusion_gpr_di_hi_store },
3383 { HImode, SImode, RELOAD_REG_GPR,
3384 CODE_FOR_fusion_gpr_si_hi_load,
3385 CODE_FOR_fusion_gpr_si_hi_store },
3387 { SImode, DImode, RELOAD_REG_GPR,
3388 CODE_FOR_fusion_gpr_di_si_load,
3389 CODE_FOR_fusion_gpr_di_si_store },
3391 { SImode, SImode, RELOAD_REG_GPR,
3392 CODE_FOR_fusion_gpr_si_si_load,
3393 CODE_FOR_fusion_gpr_si_si_store },
3395 { SFmode, DImode, RELOAD_REG_GPR,
3396 CODE_FOR_fusion_gpr_di_sf_load,
3397 CODE_FOR_fusion_gpr_di_sf_store },
3399 { SFmode, SImode, RELOAD_REG_GPR,
3400 CODE_FOR_fusion_gpr_si_sf_load,
3401 CODE_FOR_fusion_gpr_si_sf_store },
3403 { DImode, DImode, RELOAD_REG_GPR,
3404 CODE_FOR_fusion_gpr_di_di_load,
3405 CODE_FOR_fusion_gpr_di_di_store },
3407 { DFmode, DImode, RELOAD_REG_GPR,
3408 CODE_FOR_fusion_gpr_di_df_load,
3409 CODE_FOR_fusion_gpr_di_df_store },
3412 enum machine_mode cur_pmode = Pmode;
3413 size_t i;
3415 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3417 enum machine_mode xmode = addis_insns[i].mode;
3418 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3420 if (addis_insns[i].pmode != cur_pmode)
3421 continue;
3423 if (rtype == RELOAD_REG_FPR
3424 && (!TARGET_HARD_FLOAT || !TARGET_FPRS))
3425 continue;
3427 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3428 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3432 /* Note which types we support fusing TOC setup plus memory insn. We only do
3433 fused TOCs for medium/large code models. */
3434 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3435 && (TARGET_CMODEL != CMODEL_SMALL))
3437 reg_addr[QImode].fused_toc = true;
3438 reg_addr[HImode].fused_toc = true;
3439 reg_addr[SImode].fused_toc = true;
3440 reg_addr[DImode].fused_toc = true;
3441 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3443 if (TARGET_SINGLE_FLOAT)
3444 reg_addr[SFmode].fused_toc = true;
3445 if (TARGET_DOUBLE_FLOAT)
3446 reg_addr[DFmode].fused_toc = true;
3450 /* Precalculate HARD_REGNO_NREGS. */
3451 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3452 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3453 rs6000_hard_regno_nregs[m][r]
3454 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3456 /* Precalculate HARD_REGNO_MODE_OK. */
3457 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3458 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3459 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
3460 rs6000_hard_regno_mode_ok_p[m][r] = true;
3462 /* Precalculate CLASS_MAX_NREGS sizes. */
3463 for (c = 0; c < LIM_REG_CLASSES; ++c)
3465 int reg_size;
3467 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3468 reg_size = UNITS_PER_VSX_WORD;
3470 else if (c == ALTIVEC_REGS)
3471 reg_size = UNITS_PER_ALTIVEC_WORD;
3473 else if (c == FLOAT_REGS)
3474 reg_size = UNITS_PER_FP_WORD;
3476 else
3477 reg_size = UNITS_PER_WORD;
3479 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3481 machine_mode m2 = (machine_mode)m;
3482 int reg_size2 = reg_size;
3484 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3485 in VSX. */
3486 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3487 reg_size2 = UNITS_PER_FP_WORD;
3489 rs6000_class_max_nregs[m][c]
3490 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3494 if (TARGET_E500_DOUBLE)
3495 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
3497 /* Calculate which modes to automatically generate code to use a the
3498 reciprocal divide and square root instructions. In the future, possibly
3499 automatically generate the instructions even if the user did not specify
3500 -mrecip. The older machines double precision reciprocal sqrt estimate is
3501 not accurate enough. */
3502 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3503 if (TARGET_FRES)
3504 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3505 if (TARGET_FRE)
3506 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3507 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3508 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3509 if (VECTOR_UNIT_VSX_P (V2DFmode))
3510 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3512 if (TARGET_FRSQRTES)
3513 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3514 if (TARGET_FRSQRTE)
3515 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3516 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3517 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3518 if (VECTOR_UNIT_VSX_P (V2DFmode))
3519 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3521 if (rs6000_recip_control)
3523 if (!flag_finite_math_only)
3524 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
3525 if (flag_trapping_math)
3526 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
3527 if (!flag_reciprocal_math)
3528 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
3529 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3531 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3532 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3533 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3535 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3536 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3537 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3539 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3540 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3541 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3543 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3544 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3545 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3547 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3548 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3549 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3551 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3552 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3553 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3555 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3556 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3557 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3560 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3561 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3565 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3566 legitimate address support to figure out the appropriate addressing to
3567 use. */
3568 rs6000_setup_reg_addr_masks ();
3570 if (global_init_p || TARGET_DEBUG_TARGET)
3572 if (TARGET_DEBUG_REG)
3573 rs6000_debug_reg_global ();
3575 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3576 fprintf (stderr,
3577 "SImode variable mult cost = %d\n"
3578 "SImode constant mult cost = %d\n"
3579 "SImode short constant mult cost = %d\n"
3580 "DImode multipliciation cost = %d\n"
3581 "SImode division cost = %d\n"
3582 "DImode division cost = %d\n"
3583 "Simple fp operation cost = %d\n"
3584 "DFmode multiplication cost = %d\n"
3585 "SFmode division cost = %d\n"
3586 "DFmode division cost = %d\n"
3587 "cache line size = %d\n"
3588 "l1 cache size = %d\n"
3589 "l2 cache size = %d\n"
3590 "simultaneous prefetches = %d\n"
3591 "\n",
3592 rs6000_cost->mulsi,
3593 rs6000_cost->mulsi_const,
3594 rs6000_cost->mulsi_const9,
3595 rs6000_cost->muldi,
3596 rs6000_cost->divsi,
3597 rs6000_cost->divdi,
3598 rs6000_cost->fp,
3599 rs6000_cost->dmul,
3600 rs6000_cost->sdiv,
3601 rs6000_cost->ddiv,
3602 rs6000_cost->cache_line_size,
3603 rs6000_cost->l1_cache_size,
3604 rs6000_cost->l2_cache_size,
3605 rs6000_cost->simultaneous_prefetches);
3609 #if TARGET_MACHO
3610 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3612 static void
3613 darwin_rs6000_override_options (void)
3615 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3616 off. */
3617 rs6000_altivec_abi = 1;
3618 TARGET_ALTIVEC_VRSAVE = 1;
3619 rs6000_current_abi = ABI_DARWIN;
3621 if (DEFAULT_ABI == ABI_DARWIN
3622 && TARGET_64BIT)
3623 darwin_one_byte_bool = 1;
3625 if (TARGET_64BIT && ! TARGET_POWERPC64)
3627 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3628 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3630 if (flag_mkernel)
3632 rs6000_default_long_calls = 1;
3633 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3636 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3637 Altivec. */
3638 if (!flag_mkernel && !flag_apple_kext
3639 && TARGET_64BIT
3640 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3641 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3643 /* Unless the user (not the configurer) has explicitly overridden
3644 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3645 G4 unless targeting the kernel. */
3646 if (!flag_mkernel
3647 && !flag_apple_kext
3648 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3649 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3650 && ! global_options_set.x_rs6000_cpu_index)
3652 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3655 #endif
3657 /* If not otherwise specified by a target, make 'long double' equivalent to
3658 'double'. */
3660 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3661 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3662 #endif
3664 /* Return the builtin mask of the various options used that could affect which
3665 builtins were used. In the past we used target_flags, but we've run out of
3666 bits, and some options like SPE and PAIRED are no longer in
3667 target_flags. */
3669 HOST_WIDE_INT
3670 rs6000_builtin_mask_calculate (void)
3672 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3673 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3674 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3675 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3676 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3677 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3678 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3679 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3680 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3681 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3682 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3683 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3684 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3685 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3686 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3687 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3688 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3689 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3690 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3691 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3692 | ((TARGET_FLOAT128) ? RS6000_BTM_FLOAT128 : 0));
3695 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3696 to clobber the XER[CA] bit because clobbering that bit without telling
3697 the compiler worked just fine with versions of GCC before GCC 5, and
3698 breaking a lot of older code in ways that are hard to track down is
3699 not such a great idea. */
3701 static rtx_insn *
3702 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3703 vec<const char *> &/*constraints*/,
3704 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3706 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3707 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3708 return NULL;
3711 /* Override command line options. Mostly we process the processor type and
3712 sometimes adjust other TARGET_ options. */
3714 static bool
3715 rs6000_option_override_internal (bool global_init_p)
3717 bool ret = true;
3718 bool have_cpu = false;
3720 /* The default cpu requested at configure time, if any. */
3721 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3723 HOST_WIDE_INT set_masks;
3724 int cpu_index;
3725 int tune_index;
3726 struct cl_target_option *main_target_opt
3727 = ((global_init_p || target_option_default_node == NULL)
3728 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3730 /* Print defaults. */
3731 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3732 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3734 /* Remember the explicit arguments. */
3735 if (global_init_p)
3736 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3738 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3739 library functions, so warn about it. The flag may be useful for
3740 performance studies from time to time though, so don't disable it
3741 entirely. */
3742 if (global_options_set.x_rs6000_alignment_flags
3743 && rs6000_alignment_flags == MASK_ALIGN_POWER
3744 && DEFAULT_ABI == ABI_DARWIN
3745 && TARGET_64BIT)
3746 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3747 " it is incompatible with the installed C and C++ libraries");
3749 /* Numerous experiment shows that IRA based loop pressure
3750 calculation works better for RTL loop invariant motion on targets
3751 with enough (>= 32) registers. It is an expensive optimization.
3752 So it is on only for peak performance. */
3753 if (optimize >= 3 && global_init_p
3754 && !global_options_set.x_flag_ira_loop_pressure)
3755 flag_ira_loop_pressure = 1;
3757 /* Set the pointer size. */
3758 if (TARGET_64BIT)
3760 rs6000_pmode = (int)DImode;
3761 rs6000_pointer_size = 64;
3763 else
3765 rs6000_pmode = (int)SImode;
3766 rs6000_pointer_size = 32;
3769 /* Some OSs don't support saving the high part of 64-bit registers on context
3770 switch. Other OSs don't support saving Altivec registers. On those OSs,
3771 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3772 if the user wants either, the user must explicitly specify them and we
3773 won't interfere with the user's specification. */
3775 set_masks = POWERPC_MASKS;
3776 #ifdef OS_MISSING_POWERPC64
3777 if (OS_MISSING_POWERPC64)
3778 set_masks &= ~OPTION_MASK_POWERPC64;
3779 #endif
3780 #ifdef OS_MISSING_ALTIVEC
3781 if (OS_MISSING_ALTIVEC)
3782 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3783 #endif
3785 /* Don't override by the processor default if given explicitly. */
3786 set_masks &= ~rs6000_isa_flags_explicit;
3788 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3789 the cpu in a target attribute or pragma, but did not specify a tuning
3790 option, use the cpu for the tuning option rather than the option specified
3791 with -mtune on the command line. Process a '--with-cpu' configuration
3792 request as an implicit --cpu. */
3793 if (rs6000_cpu_index >= 0)
3795 cpu_index = rs6000_cpu_index;
3796 have_cpu = true;
3798 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3800 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3801 have_cpu = true;
3803 else if (implicit_cpu)
3805 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3806 have_cpu = true;
3808 else
3810 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3811 const char *default_cpu = ((!TARGET_POWERPC64)
3812 ? "powerpc"
3813 : ((BYTES_BIG_ENDIAN)
3814 ? "powerpc64"
3815 : "powerpc64le"));
3817 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3818 have_cpu = false;
3821 gcc_assert (cpu_index >= 0);
3823 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3824 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3825 with those from the cpu, except for options that were explicitly set. If
3826 we don't have a cpu, do not override the target bits set in
3827 TARGET_DEFAULT. */
3828 if (have_cpu)
3830 rs6000_isa_flags &= ~set_masks;
3831 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3832 & set_masks);
3834 else
3836 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3837 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3838 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3839 to using rs6000_isa_flags, we need to do the initialization here.
3841 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3842 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3843 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
3844 : processor_target_table[cpu_index].target_enable);
3845 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3848 if (rs6000_tune_index >= 0)
3849 tune_index = rs6000_tune_index;
3850 else if (have_cpu)
3851 rs6000_tune_index = tune_index = cpu_index;
3852 else
3854 size_t i;
3855 enum processor_type tune_proc
3856 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3858 tune_index = -1;
3859 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3860 if (processor_target_table[i].processor == tune_proc)
3862 rs6000_tune_index = tune_index = i;
3863 break;
3867 gcc_assert (tune_index >= 0);
3868 rs6000_cpu = processor_target_table[tune_index].processor;
3870 /* Pick defaults for SPE related control flags. Do this early to make sure
3871 that the TARGET_ macros are representative ASAP. */
3873 int spe_capable_cpu =
3874 (rs6000_cpu == PROCESSOR_PPC8540
3875 || rs6000_cpu == PROCESSOR_PPC8548);
3877 if (!global_options_set.x_rs6000_spe_abi)
3878 rs6000_spe_abi = spe_capable_cpu;
3880 if (!global_options_set.x_rs6000_spe)
3881 rs6000_spe = spe_capable_cpu;
3883 if (!global_options_set.x_rs6000_float_gprs)
3884 rs6000_float_gprs =
3885 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3886 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3887 : 0);
3890 if (global_options_set.x_rs6000_spe_abi
3891 && rs6000_spe_abi
3892 && !TARGET_SPE_ABI)
3893 error ("not configured for SPE ABI");
3895 if (global_options_set.x_rs6000_spe
3896 && rs6000_spe
3897 && !TARGET_SPE)
3898 error ("not configured for SPE instruction set");
3900 if (main_target_opt != NULL
3901 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3902 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3903 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3904 error ("target attribute or pragma changes SPE ABI");
3906 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3907 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3908 || rs6000_cpu == PROCESSOR_PPCE5500)
3910 if (TARGET_ALTIVEC)
3911 error ("AltiVec not supported in this target");
3912 if (TARGET_SPE)
3913 error ("SPE not supported in this target");
3915 if (rs6000_cpu == PROCESSOR_PPCE6500)
3917 if (TARGET_SPE)
3918 error ("SPE not supported in this target");
3921 /* Disable Cell microcode if we are optimizing for the Cell
3922 and not optimizing for size. */
3923 if (rs6000_gen_cell_microcode == -1)
3924 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3925 && !optimize_size);
3927 /* If we are optimizing big endian systems for space and it's OK to
3928 use instructions that would be microcoded on the Cell, use the
3929 load/store multiple and string instructions. */
3930 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3931 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3932 | OPTION_MASK_STRING);
3934 /* Don't allow -mmultiple or -mstring on little endian systems
3935 unless the cpu is a 750, because the hardware doesn't support the
3936 instructions used in little endian mode, and causes an alignment
3937 trap. The 750 does not cause an alignment trap (except when the
3938 target is unaligned). */
3940 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3942 if (TARGET_MULTIPLE)
3944 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3945 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3946 warning (0, "-mmultiple is not supported on little endian systems");
3949 if (TARGET_STRING)
3951 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3952 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3953 warning (0, "-mstring is not supported on little endian systems");
3957 /* If little-endian, default to -mstrict-align on older processors.
3958 Testing for htm matches power8 and later. */
3959 if (!BYTES_BIG_ENDIAN
3960 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3961 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3963 /* -maltivec={le,be} implies -maltivec. */
3964 if (rs6000_altivec_element_order != 0)
3965 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3967 /* Disallow -maltivec=le in big endian mode for now. This is not
3968 known to be useful for anyone. */
3969 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3971 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3972 rs6000_altivec_element_order = 0;
3975 /* Add some warnings for VSX. */
3976 if (TARGET_VSX)
3978 const char *msg = NULL;
3979 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3980 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3982 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3983 msg = N_("-mvsx requires hardware floating point");
3984 else
3986 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3987 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3990 else if (TARGET_PAIRED_FLOAT)
3991 msg = N_("-mvsx and -mpaired are incompatible");
3992 else if (TARGET_AVOID_XFORM > 0)
3993 msg = N_("-mvsx needs indexed addressing");
3994 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3995 & OPTION_MASK_ALTIVEC))
3997 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3998 msg = N_("-mvsx and -mno-altivec are incompatible");
3999 else
4000 msg = N_("-mno-altivec disables vsx");
4003 if (msg)
4005 warning (0, msg);
4006 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4007 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4011 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4012 the -mcpu setting to enable options that conflict. */
4013 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4014 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4015 | OPTION_MASK_ALTIVEC
4016 | OPTION_MASK_VSX)) != 0)
4017 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4018 | OPTION_MASK_DIRECT_MOVE)
4019 & ~rs6000_isa_flags_explicit);
4021 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4022 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4024 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4025 unless the user explicitly used the -mno-<option> to disable the code. */
4026 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM_SCALAR
4027 || TARGET_P9_DFORM_VECTOR || TARGET_P9_DFORM_BOTH > 0 || TARGET_P9_MINMAX)
4028 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4029 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4030 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4031 else if (TARGET_VSX)
4032 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4033 else if (TARGET_POPCNTD)
4034 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4035 else if (TARGET_DFP)
4036 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4037 else if (TARGET_CMPB)
4038 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4039 else if (TARGET_FPRND)
4040 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
4041 else if (TARGET_POPCNTB)
4042 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
4043 else if (TARGET_ALTIVEC)
4044 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
4046 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4048 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4049 error ("-mcrypto requires -maltivec");
4050 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4053 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4055 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4056 error ("-mdirect-move requires -mvsx");
4057 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4060 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4062 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4063 error ("-mpower8-vector requires -maltivec");
4064 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4067 if (TARGET_P8_VECTOR && !TARGET_VSX)
4069 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4070 error ("-mpower8-vector requires -mvsx");
4071 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4074 if (TARGET_VSX_TIMODE && !TARGET_VSX)
4076 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
4077 error ("-mvsx-timode requires -mvsx");
4078 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4081 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4083 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4084 error ("-mhard-dfp requires -mhard-float");
4085 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4088 /* Allow an explicit -mupper-regs to set -mupper-regs-df, -mupper-regs-di,
4089 and -mupper-regs-sf, depending on the cpu, unless the user explicitly also
4090 set the individual option. */
4091 if (TARGET_UPPER_REGS > 0)
4093 if (TARGET_VSX
4094 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4096 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DF;
4097 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4099 if (TARGET_VSX
4100 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4102 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DI;
4103 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4105 if (TARGET_P8_VECTOR
4106 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4108 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_SF;
4109 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4112 else if (TARGET_UPPER_REGS == 0)
4114 if (TARGET_VSX
4115 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4117 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4118 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4120 if (TARGET_VSX
4121 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4123 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DI;
4124 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4126 if (TARGET_P8_VECTOR
4127 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4129 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4130 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4134 if (TARGET_UPPER_REGS_DF && !TARGET_VSX)
4136 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4137 error ("-mupper-regs-df requires -mvsx");
4138 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4141 if (TARGET_UPPER_REGS_DI && !TARGET_VSX)
4143 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4144 error ("-mupper-regs-di requires -mvsx");
4145 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4148 if (TARGET_UPPER_REGS_SF && !TARGET_P8_VECTOR)
4150 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4151 error ("-mupper-regs-sf requires -mpower8-vector");
4152 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4155 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4156 silently turn off quad memory mode. */
4157 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4159 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4160 warning (0, N_("-mquad-memory requires 64-bit mode"));
4162 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4163 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4165 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4166 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4169 /* Non-atomic quad memory load/store are disabled for little endian, since
4170 the words are reversed, but atomic operations can still be done by
4171 swapping the words. */
4172 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4174 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4175 warning (0, N_("-mquad-memory is not available in little endian mode"));
4177 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4180 /* Assume if the user asked for normal quad memory instructions, they want
4181 the atomic versions as well, unless they explicity told us not to use quad
4182 word atomic instructions. */
4183 if (TARGET_QUAD_MEMORY
4184 && !TARGET_QUAD_MEMORY_ATOMIC
4185 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4186 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4188 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4189 generating power8 instructions. */
4190 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4191 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4192 & OPTION_MASK_P8_FUSION);
4194 /* Setting additional fusion flags turns on base fusion. */
4195 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4197 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4199 if (TARGET_P8_FUSION_SIGN)
4200 error ("-mpower8-fusion-sign requires -mpower8-fusion");
4202 if (TARGET_TOC_FUSION)
4203 error ("-mtoc-fusion requires -mpower8-fusion");
4205 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4207 else
4208 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4211 /* Power9 fusion is a superset over power8 fusion. */
4212 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4214 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4216 /* We prefer to not mention undocumented options in
4217 error messages. However, if users have managed to select
4218 power9-fusion without selecting power8-fusion, they
4219 already know about undocumented flags. */
4220 error ("-mpower9-fusion requires -mpower8-fusion");
4221 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4223 else
4224 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4227 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4228 generating power9 instructions. */
4229 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4230 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4231 & OPTION_MASK_P9_FUSION);
4233 /* Power8 does not fuse sign extended loads with the addis. If we are
4234 optimizing at high levels for speed, convert a sign extended load into a
4235 zero extending load, and an explicit sign extension. */
4236 if (TARGET_P8_FUSION
4237 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4238 && optimize_function_for_speed_p (cfun)
4239 && optimize >= 3)
4240 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4242 /* TOC fusion requires 64-bit and medium/large code model. */
4243 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4245 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4246 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4247 warning (0, N_("-mtoc-fusion requires 64-bit"));
4250 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4252 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4253 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4254 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4257 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4258 model. */
4259 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4260 && (TARGET_CMODEL != CMODEL_SMALL)
4261 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4262 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4264 /* ISA 3.0 vector instructions include ISA 2.07. */
4265 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4267 /* We prefer to not mention undocumented options in
4268 error messages. However, if users have managed to select
4269 power9-vector without selecting power8-vector, they
4270 already know about undocumented flags. */
4271 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4272 error ("-mpower9-vector requires -mpower8-vector");
4273 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4276 /* -mpower9-dform turns on both -mpower9-dform-scalar and
4277 -mpower9-dform-vector. */
4278 if (TARGET_P9_DFORM_BOTH > 0)
4280 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4281 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
4283 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4284 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_SCALAR;
4286 else if (TARGET_P9_DFORM_BOTH == 0)
4288 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4289 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
4291 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4292 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4295 /* ISA 3.0 D-form instructions require p9-vector and upper-regs. */
4296 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR) && !TARGET_P9_VECTOR)
4298 /* We prefer to not mention undocumented options in
4299 error messages. However, if users have managed to select
4300 power9-dform without selecting power9-vector, they
4301 already know about undocumented flags. */
4302 if (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4303 error ("-mpower9-dform requires -mpower9-vector");
4304 rs6000_isa_flags &= ~(OPTION_MASK_P9_DFORM_SCALAR
4305 | OPTION_MASK_P9_DFORM_VECTOR);
4308 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_DF)
4310 /* We prefer to not mention undocumented options in
4311 error messages. However, if users have managed to select
4312 power9-dform without selecting upper-regs-df, they
4313 already know about undocumented flags. */
4314 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4315 error ("-mpower9-dform requires -mupper-regs-df");
4316 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4319 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_SF)
4321 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4322 error ("-mpower9-dform requires -mupper-regs-sf");
4323 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4326 /* Enable LRA by default. */
4327 if ((rs6000_isa_flags_explicit & OPTION_MASK_LRA) == 0)
4328 rs6000_isa_flags |= OPTION_MASK_LRA;
4330 /* There have been bugs with -mvsx-timode that don't show up with -mlra,
4331 but do show up with -mno-lra. Given -mlra will become the default once
4332 PR 69847 is fixed, turn off the options with problems by default if
4333 -mno-lra was used, and warn if the user explicitly asked for the option.
4335 Enable -mpower9-dform-vector by default if LRA and other power9 options.
4336 Enable -mvsx-timode by default if LRA and VSX. */
4337 if (!TARGET_LRA)
4339 if (TARGET_VSX_TIMODE)
4341 if ((rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) != 0)
4342 warning (0, "-mvsx-timode might need -mlra");
4344 else
4345 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4349 else
4351 if (TARGET_VSX && !TARGET_VSX_TIMODE
4352 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
4353 rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
4356 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4357 support. If we only have ISA 2.06 support, and the user did not specify
4358 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4359 but we don't enable the full vectorization support */
4360 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4361 TARGET_ALLOW_MOVMISALIGN = 1;
4363 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4365 if (TARGET_ALLOW_MOVMISALIGN > 0
4366 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4367 error ("-mallow-movmisalign requires -mvsx");
4369 TARGET_ALLOW_MOVMISALIGN = 0;
4372 /* Determine when unaligned vector accesses are permitted, and when
4373 they are preferred over masked Altivec loads. Note that if
4374 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4375 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4376 not true. */
4377 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4379 if (!TARGET_VSX)
4381 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4382 error ("-mefficient-unaligned-vsx requires -mvsx");
4384 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4387 else if (!TARGET_ALLOW_MOVMISALIGN)
4389 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4390 error ("-mefficient-unaligned-vsx requires -mallow-movmisalign");
4392 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4396 /* __float128 requires VSX support. */
4397 if (TARGET_FLOAT128 && !TARGET_VSX)
4399 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128) != 0)
4400 error ("-mfloat128 requires VSX support");
4402 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128 | OPTION_MASK_FLOAT128_HW);
4405 /* If we have -mfloat128 and full ISA 3.0 support, enable -mfloat128-hardware
4406 by default. */
4407 if (TARGET_FLOAT128 && !TARGET_FLOAT128_HW
4408 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4409 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4411 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4412 if ((rs6000_isa_flags & OPTION_MASK_FLOAT128) != 0)
4413 rs6000_isa_flags_explicit |= OPTION_MASK_FLOAT128_HW;
4416 /* IEEE 128-bit floating point hardware instructions imply enabling
4417 __float128. */
4418 if (TARGET_FLOAT128_HW
4419 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4421 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4422 error ("-mfloat128-hardware requires full ISA 3.0 support");
4424 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4427 if (TARGET_FLOAT128_HW
4428 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128) == 0)
4429 rs6000_isa_flags |= OPTION_MASK_FLOAT128;
4431 /* Print the options after updating the defaults. */
4432 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4433 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4435 /* E500mc does "better" if we inline more aggressively. Respect the
4436 user's opinion, though. */
4437 if (rs6000_block_move_inline_limit == 0
4438 && (rs6000_cpu == PROCESSOR_PPCE500MC
4439 || rs6000_cpu == PROCESSOR_PPCE500MC64
4440 || rs6000_cpu == PROCESSOR_PPCE5500
4441 || rs6000_cpu == PROCESSOR_PPCE6500))
4442 rs6000_block_move_inline_limit = 128;
4444 /* store_one_arg depends on expand_block_move to handle at least the
4445 size of reg_parm_stack_space. */
4446 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4447 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4449 if (global_init_p)
4451 /* If the appropriate debug option is enabled, replace the target hooks
4452 with debug versions that call the real version and then prints
4453 debugging information. */
4454 if (TARGET_DEBUG_COST)
4456 targetm.rtx_costs = rs6000_debug_rtx_costs;
4457 targetm.address_cost = rs6000_debug_address_cost;
4458 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4461 if (TARGET_DEBUG_ADDR)
4463 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4464 targetm.legitimize_address = rs6000_debug_legitimize_address;
4465 rs6000_secondary_reload_class_ptr
4466 = rs6000_debug_secondary_reload_class;
4467 rs6000_secondary_memory_needed_ptr
4468 = rs6000_debug_secondary_memory_needed;
4469 rs6000_cannot_change_mode_class_ptr
4470 = rs6000_debug_cannot_change_mode_class;
4471 rs6000_preferred_reload_class_ptr
4472 = rs6000_debug_preferred_reload_class;
4473 rs6000_legitimize_reload_address_ptr
4474 = rs6000_debug_legitimize_reload_address;
4475 rs6000_mode_dependent_address_ptr
4476 = rs6000_debug_mode_dependent_address;
4479 if (rs6000_veclibabi_name)
4481 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4482 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4483 else
4485 error ("unknown vectorization library ABI type (%s) for "
4486 "-mveclibabi= switch", rs6000_veclibabi_name);
4487 ret = false;
4492 if (!global_options_set.x_rs6000_long_double_type_size)
4494 if (main_target_opt != NULL
4495 && (main_target_opt->x_rs6000_long_double_type_size
4496 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4497 error ("target attribute or pragma changes long double size");
4498 else
4499 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4502 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4503 if (!global_options_set.x_rs6000_ieeequad)
4504 rs6000_ieeequad = 1;
4505 #endif
4507 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4508 target attribute or pragma which automatically enables both options,
4509 unless the altivec ABI was set. This is set by default for 64-bit, but
4510 not for 32-bit. */
4511 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4512 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4513 | OPTION_MASK_FLOAT128)
4514 & ~rs6000_isa_flags_explicit);
4516 /* Enable Altivec ABI for AIX -maltivec. */
4517 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4519 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4520 error ("target attribute or pragma changes AltiVec ABI");
4521 else
4522 rs6000_altivec_abi = 1;
4525 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4526 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4527 be explicitly overridden in either case. */
4528 if (TARGET_ELF)
4530 if (!global_options_set.x_rs6000_altivec_abi
4531 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4533 if (main_target_opt != NULL &&
4534 !main_target_opt->x_rs6000_altivec_abi)
4535 error ("target attribute or pragma changes AltiVec ABI");
4536 else
4537 rs6000_altivec_abi = 1;
4541 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4542 So far, the only darwin64 targets are also MACH-O. */
4543 if (TARGET_MACHO
4544 && DEFAULT_ABI == ABI_DARWIN
4545 && TARGET_64BIT)
4547 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4548 error ("target attribute or pragma changes darwin64 ABI");
4549 else
4551 rs6000_darwin64_abi = 1;
4552 /* Default to natural alignment, for better performance. */
4553 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4557 /* Place FP constants in the constant pool instead of TOC
4558 if section anchors enabled. */
4559 if (flag_section_anchors
4560 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4561 TARGET_NO_FP_IN_TOC = 1;
4563 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4564 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4566 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4567 SUBTARGET_OVERRIDE_OPTIONS;
4568 #endif
4569 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4570 SUBSUBTARGET_OVERRIDE_OPTIONS;
4571 #endif
4572 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4573 SUB3TARGET_OVERRIDE_OPTIONS;
4574 #endif
4576 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4577 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4579 /* For the E500 family of cores, reset the single/double FP flags to let us
4580 check that they remain constant across attributes or pragmas. Also,
4581 clear a possible request for string instructions, not supported and which
4582 we might have silently queried above for -Os.
4584 For other families, clear ISEL in case it was set implicitly.
4587 switch (rs6000_cpu)
4589 case PROCESSOR_PPC8540:
4590 case PROCESSOR_PPC8548:
4591 case PROCESSOR_PPCE500MC:
4592 case PROCESSOR_PPCE500MC64:
4593 case PROCESSOR_PPCE5500:
4594 case PROCESSOR_PPCE6500:
4596 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
4597 rs6000_double_float = TARGET_E500_DOUBLE;
4599 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4601 break;
4603 default:
4605 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4606 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4608 break;
4611 if (main_target_opt)
4613 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4614 error ("target attribute or pragma changes single precision floating "
4615 "point");
4616 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4617 error ("target attribute or pragma changes double precision floating "
4618 "point");
4621 /* Detect invalid option combinations with E500. */
4622 CHECK_E500_OPTIONS;
4624 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4625 && rs6000_cpu != PROCESSOR_POWER5
4626 && rs6000_cpu != PROCESSOR_POWER6
4627 && rs6000_cpu != PROCESSOR_POWER7
4628 && rs6000_cpu != PROCESSOR_POWER8
4629 && rs6000_cpu != PROCESSOR_POWER9
4630 && rs6000_cpu != PROCESSOR_PPCA2
4631 && rs6000_cpu != PROCESSOR_CELL
4632 && rs6000_cpu != PROCESSOR_PPC476);
4633 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4634 || rs6000_cpu == PROCESSOR_POWER5
4635 || rs6000_cpu == PROCESSOR_POWER7
4636 || rs6000_cpu == PROCESSOR_POWER8);
4637 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4638 || rs6000_cpu == PROCESSOR_POWER5
4639 || rs6000_cpu == PROCESSOR_POWER6
4640 || rs6000_cpu == PROCESSOR_POWER7
4641 || rs6000_cpu == PROCESSOR_POWER8
4642 || rs6000_cpu == PROCESSOR_POWER9
4643 || rs6000_cpu == PROCESSOR_PPCE500MC
4644 || rs6000_cpu == PROCESSOR_PPCE500MC64
4645 || rs6000_cpu == PROCESSOR_PPCE5500
4646 || rs6000_cpu == PROCESSOR_PPCE6500);
4648 /* Allow debug switches to override the above settings. These are set to -1
4649 in rs6000.opt to indicate the user hasn't directly set the switch. */
4650 if (TARGET_ALWAYS_HINT >= 0)
4651 rs6000_always_hint = TARGET_ALWAYS_HINT;
4653 if (TARGET_SCHED_GROUPS >= 0)
4654 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4656 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4657 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4659 rs6000_sched_restricted_insns_priority
4660 = (rs6000_sched_groups ? 1 : 0);
4662 /* Handle -msched-costly-dep option. */
4663 rs6000_sched_costly_dep
4664 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4666 if (rs6000_sched_costly_dep_str)
4668 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4669 rs6000_sched_costly_dep = no_dep_costly;
4670 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4671 rs6000_sched_costly_dep = all_deps_costly;
4672 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4673 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4674 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4675 rs6000_sched_costly_dep = store_to_load_dep_costly;
4676 else
4677 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4678 atoi (rs6000_sched_costly_dep_str));
4681 /* Handle -minsert-sched-nops option. */
4682 rs6000_sched_insert_nops
4683 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4685 if (rs6000_sched_insert_nops_str)
4687 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4688 rs6000_sched_insert_nops = sched_finish_none;
4689 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4690 rs6000_sched_insert_nops = sched_finish_pad_groups;
4691 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4692 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4693 else
4694 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4695 atoi (rs6000_sched_insert_nops_str));
4698 if (global_init_p)
4700 #ifdef TARGET_REGNAMES
4701 /* If the user desires alternate register names, copy in the
4702 alternate names now. */
4703 if (TARGET_REGNAMES)
4704 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4705 #endif
4707 /* Set aix_struct_return last, after the ABI is determined.
4708 If -maix-struct-return or -msvr4-struct-return was explicitly
4709 used, don't override with the ABI default. */
4710 if (!global_options_set.x_aix_struct_return)
4711 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4713 #if 0
4714 /* IBM XL compiler defaults to unsigned bitfields. */
4715 if (TARGET_XL_COMPAT)
4716 flag_signed_bitfields = 0;
4717 #endif
4719 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4720 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4722 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4724 /* We can only guarantee the availability of DI pseudo-ops when
4725 assembling for 64-bit targets. */
4726 if (!TARGET_64BIT)
4728 targetm.asm_out.aligned_op.di = NULL;
4729 targetm.asm_out.unaligned_op.di = NULL;
4733 /* Set branch target alignment, if not optimizing for size. */
4734 if (!optimize_size)
4736 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4737 aligned 8byte to avoid misprediction by the branch predictor. */
4738 if (rs6000_cpu == PROCESSOR_TITAN
4739 || rs6000_cpu == PROCESSOR_CELL)
4741 if (align_functions <= 0)
4742 align_functions = 8;
4743 if (align_jumps <= 0)
4744 align_jumps = 8;
4745 if (align_loops <= 0)
4746 align_loops = 8;
4748 if (rs6000_align_branch_targets)
4750 if (align_functions <= 0)
4751 align_functions = 16;
4752 if (align_jumps <= 0)
4753 align_jumps = 16;
4754 if (align_loops <= 0)
4756 can_override_loop_align = 1;
4757 align_loops = 16;
4760 if (align_jumps_max_skip <= 0)
4761 align_jumps_max_skip = 15;
4762 if (align_loops_max_skip <= 0)
4763 align_loops_max_skip = 15;
4766 /* Arrange to save and restore machine status around nested functions. */
4767 init_machine_status = rs6000_init_machine_status;
4769 /* We should always be splitting complex arguments, but we can't break
4770 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4771 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4772 targetm.calls.split_complex_arg = NULL;
4775 /* Initialize rs6000_cost with the appropriate target costs. */
4776 if (optimize_size)
4777 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4778 else
4779 switch (rs6000_cpu)
4781 case PROCESSOR_RS64A:
4782 rs6000_cost = &rs64a_cost;
4783 break;
4785 case PROCESSOR_MPCCORE:
4786 rs6000_cost = &mpccore_cost;
4787 break;
4789 case PROCESSOR_PPC403:
4790 rs6000_cost = &ppc403_cost;
4791 break;
4793 case PROCESSOR_PPC405:
4794 rs6000_cost = &ppc405_cost;
4795 break;
4797 case PROCESSOR_PPC440:
4798 rs6000_cost = &ppc440_cost;
4799 break;
4801 case PROCESSOR_PPC476:
4802 rs6000_cost = &ppc476_cost;
4803 break;
4805 case PROCESSOR_PPC601:
4806 rs6000_cost = &ppc601_cost;
4807 break;
4809 case PROCESSOR_PPC603:
4810 rs6000_cost = &ppc603_cost;
4811 break;
4813 case PROCESSOR_PPC604:
4814 rs6000_cost = &ppc604_cost;
4815 break;
4817 case PROCESSOR_PPC604e:
4818 rs6000_cost = &ppc604e_cost;
4819 break;
4821 case PROCESSOR_PPC620:
4822 rs6000_cost = &ppc620_cost;
4823 break;
4825 case PROCESSOR_PPC630:
4826 rs6000_cost = &ppc630_cost;
4827 break;
4829 case PROCESSOR_CELL:
4830 rs6000_cost = &ppccell_cost;
4831 break;
4833 case PROCESSOR_PPC750:
4834 case PROCESSOR_PPC7400:
4835 rs6000_cost = &ppc750_cost;
4836 break;
4838 case PROCESSOR_PPC7450:
4839 rs6000_cost = &ppc7450_cost;
4840 break;
4842 case PROCESSOR_PPC8540:
4843 case PROCESSOR_PPC8548:
4844 rs6000_cost = &ppc8540_cost;
4845 break;
4847 case PROCESSOR_PPCE300C2:
4848 case PROCESSOR_PPCE300C3:
4849 rs6000_cost = &ppce300c2c3_cost;
4850 break;
4852 case PROCESSOR_PPCE500MC:
4853 rs6000_cost = &ppce500mc_cost;
4854 break;
4856 case PROCESSOR_PPCE500MC64:
4857 rs6000_cost = &ppce500mc64_cost;
4858 break;
4860 case PROCESSOR_PPCE5500:
4861 rs6000_cost = &ppce5500_cost;
4862 break;
4864 case PROCESSOR_PPCE6500:
4865 rs6000_cost = &ppce6500_cost;
4866 break;
4868 case PROCESSOR_TITAN:
4869 rs6000_cost = &titan_cost;
4870 break;
4872 case PROCESSOR_POWER4:
4873 case PROCESSOR_POWER5:
4874 rs6000_cost = &power4_cost;
4875 break;
4877 case PROCESSOR_POWER6:
4878 rs6000_cost = &power6_cost;
4879 break;
4881 case PROCESSOR_POWER7:
4882 rs6000_cost = &power7_cost;
4883 break;
4885 case PROCESSOR_POWER8:
4886 rs6000_cost = &power8_cost;
4887 break;
4889 case PROCESSOR_POWER9:
4890 rs6000_cost = &power9_cost;
4891 break;
4893 case PROCESSOR_PPCA2:
4894 rs6000_cost = &ppca2_cost;
4895 break;
4897 default:
4898 gcc_unreachable ();
4901 if (global_init_p)
4903 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4904 rs6000_cost->simultaneous_prefetches,
4905 global_options.x_param_values,
4906 global_options_set.x_param_values);
4907 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4908 global_options.x_param_values,
4909 global_options_set.x_param_values);
4910 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4911 rs6000_cost->cache_line_size,
4912 global_options.x_param_values,
4913 global_options_set.x_param_values);
4914 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4915 global_options.x_param_values,
4916 global_options_set.x_param_values);
4918 /* Increase loop peeling limits based on performance analysis. */
4919 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4920 global_options.x_param_values,
4921 global_options_set.x_param_values);
4922 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4923 global_options.x_param_values,
4924 global_options_set.x_param_values);
4926 /* If using typedef char *va_list, signal that
4927 __builtin_va_start (&ap, 0) can be optimized to
4928 ap = __builtin_next_arg (0). */
4929 if (DEFAULT_ABI != ABI_V4)
4930 targetm.expand_builtin_va_start = NULL;
4933 /* Set up single/double float flags.
4934 If TARGET_HARD_FLOAT is set, but neither single or double is set,
4935 then set both flags. */
4936 if (TARGET_HARD_FLOAT && TARGET_FPRS
4937 && rs6000_single_float == 0 && rs6000_double_float == 0)
4938 rs6000_single_float = rs6000_double_float = 1;
4940 /* If not explicitly specified via option, decide whether to generate indexed
4941 load/store instructions. */
4942 if (TARGET_AVOID_XFORM == -1)
4943 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4944 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4945 need indexed accesses and the type used is the scalar type of the element
4946 being loaded or stored. */
4947 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
4948 && !TARGET_ALTIVEC);
4950 /* Set the -mrecip options. */
4951 if (rs6000_recip_name)
4953 char *p = ASTRDUP (rs6000_recip_name);
4954 char *q;
4955 unsigned int mask, i;
4956 bool invert;
4958 while ((q = strtok (p, ",")) != NULL)
4960 p = NULL;
4961 if (*q == '!')
4963 invert = true;
4964 q++;
4966 else
4967 invert = false;
4969 if (!strcmp (q, "default"))
4970 mask = ((TARGET_RECIP_PRECISION)
4971 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4972 else
4974 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4975 if (!strcmp (q, recip_options[i].string))
4977 mask = recip_options[i].mask;
4978 break;
4981 if (i == ARRAY_SIZE (recip_options))
4983 error ("unknown option for -mrecip=%s", q);
4984 invert = false;
4985 mask = 0;
4986 ret = false;
4990 if (invert)
4991 rs6000_recip_control &= ~mask;
4992 else
4993 rs6000_recip_control |= mask;
4997 /* Set the builtin mask of the various options used that could affect which
4998 builtins were used. In the past we used target_flags, but we've run out
4999 of bits, and some options like SPE and PAIRED are no longer in
5000 target_flags. */
5001 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5002 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5003 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5004 rs6000_builtin_mask);
5006 /* Initialize all of the registers. */
5007 rs6000_init_hard_regno_mode_ok (global_init_p);
5009 /* Save the initial options in case the user does function specific options */
5010 if (global_init_p)
5011 target_option_default_node = target_option_current_node
5012 = build_target_option_node (&global_options);
5014 /* If not explicitly specified via option, decide whether to generate the
5015 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5016 if (TARGET_LINK_STACK == -1)
5017 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5019 return ret;
5022 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5023 define the target cpu type. */
5025 static void
5026 rs6000_option_override (void)
5028 (void) rs6000_option_override_internal (true);
5030 /* Register machine-specific passes. This needs to be done at start-up.
5031 It's convenient to do it here (like i386 does). */
5032 opt_pass *pass_analyze_swaps = make_pass_analyze_swaps (g);
5034 struct register_pass_info analyze_swaps_info
5035 = { pass_analyze_swaps, "cse1", 1, PASS_POS_INSERT_BEFORE };
5037 register_pass (&analyze_swaps_info);
5041 /* Implement targetm.vectorize.builtin_mask_for_load. */
5042 static tree
5043 rs6000_builtin_mask_for_load (void)
5045 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5046 if ((TARGET_ALTIVEC && !TARGET_VSX)
5047 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5048 return altivec_builtin_mask_for_load;
5049 else
5050 return 0;
5053 /* Implement LOOP_ALIGN. */
5055 rs6000_loop_align (rtx label)
5057 basic_block bb;
5058 int ninsns;
5060 /* Don't override loop alignment if -falign-loops was specified. */
5061 if (!can_override_loop_align)
5062 return align_loops_log;
5064 bb = BLOCK_FOR_INSN (label);
5065 ninsns = num_loop_insns(bb->loop_father);
5067 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5068 if (ninsns > 4 && ninsns <= 8
5069 && (rs6000_cpu == PROCESSOR_POWER4
5070 || rs6000_cpu == PROCESSOR_POWER5
5071 || rs6000_cpu == PROCESSOR_POWER6
5072 || rs6000_cpu == PROCESSOR_POWER7
5073 || rs6000_cpu == PROCESSOR_POWER8
5074 || rs6000_cpu == PROCESSOR_POWER9))
5075 return 5;
5076 else
5077 return align_loops_log;
5080 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5081 static int
5082 rs6000_loop_align_max_skip (rtx_insn *label)
5084 return (1 << rs6000_loop_align (label)) - 1;
5087 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5088 after applying N number of iterations. This routine does not determine
5089 how may iterations are required to reach desired alignment. */
5091 static bool
5092 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5094 if (is_packed)
5095 return false;
5097 if (TARGET_32BIT)
5099 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5100 return true;
5102 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5103 return true;
5105 return false;
5107 else
5109 if (TARGET_MACHO)
5110 return false;
5112 /* Assuming that all other types are naturally aligned. CHECKME! */
5113 return true;
5117 /* Return true if the vector misalignment factor is supported by the
5118 target. */
5119 static bool
5120 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5121 const_tree type,
5122 int misalignment,
5123 bool is_packed)
5125 if (TARGET_VSX)
5127 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5128 return true;
5130 /* Return if movmisalign pattern is not supported for this mode. */
5131 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5132 return false;
5134 if (misalignment == -1)
5136 /* Misalignment factor is unknown at compile time but we know
5137 it's word aligned. */
5138 if (rs6000_vector_alignment_reachable (type, is_packed))
5140 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5142 if (element_size == 64 || element_size == 32)
5143 return true;
5146 return false;
5149 /* VSX supports word-aligned vector. */
5150 if (misalignment % 4 == 0)
5151 return true;
5153 return false;
5156 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5157 static int
5158 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5159 tree vectype, int misalign)
5161 unsigned elements;
5162 tree elem_type;
5164 switch (type_of_cost)
5166 case scalar_stmt:
5167 case scalar_load:
5168 case scalar_store:
5169 case vector_stmt:
5170 case vector_load:
5171 case vector_store:
5172 case vec_to_scalar:
5173 case scalar_to_vec:
5174 case cond_branch_not_taken:
5175 return 1;
5177 case vec_perm:
5178 if (TARGET_VSX)
5179 return 3;
5180 else
5181 return 1;
5183 case vec_promote_demote:
5184 if (TARGET_VSX)
5185 return 4;
5186 else
5187 return 1;
5189 case cond_branch_taken:
5190 return 3;
5192 case unaligned_load:
5193 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5194 return 1;
5196 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5198 elements = TYPE_VECTOR_SUBPARTS (vectype);
5199 if (elements == 2)
5200 /* Double word aligned. */
5201 return 2;
5203 if (elements == 4)
5205 switch (misalign)
5207 case 8:
5208 /* Double word aligned. */
5209 return 2;
5211 case -1:
5212 /* Unknown misalignment. */
5213 case 4:
5214 case 12:
5215 /* Word aligned. */
5216 return 22;
5218 default:
5219 gcc_unreachable ();
5224 if (TARGET_ALTIVEC)
5225 /* Misaligned loads are not supported. */
5226 gcc_unreachable ();
5228 return 2;
5230 case unaligned_store:
5231 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5232 return 1;
5234 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5236 elements = TYPE_VECTOR_SUBPARTS (vectype);
5237 if (elements == 2)
5238 /* Double word aligned. */
5239 return 2;
5241 if (elements == 4)
5243 switch (misalign)
5245 case 8:
5246 /* Double word aligned. */
5247 return 2;
5249 case -1:
5250 /* Unknown misalignment. */
5251 case 4:
5252 case 12:
5253 /* Word aligned. */
5254 return 23;
5256 default:
5257 gcc_unreachable ();
5262 if (TARGET_ALTIVEC)
5263 /* Misaligned stores are not supported. */
5264 gcc_unreachable ();
5266 return 2;
5268 case vec_construct:
5269 elements = TYPE_VECTOR_SUBPARTS (vectype);
5270 elem_type = TREE_TYPE (vectype);
5271 /* 32-bit vectors loaded into registers are stored as double
5272 precision, so we need n/2 converts in addition to the usual
5273 n/2 merges to construct a vector of short floats from them. */
5274 if (SCALAR_FLOAT_TYPE_P (elem_type)
5275 && TYPE_PRECISION (elem_type) == 32)
5276 return elements + 1;
5277 else
5278 return elements / 2 + 1;
5280 default:
5281 gcc_unreachable ();
5285 /* Implement targetm.vectorize.preferred_simd_mode. */
5287 static machine_mode
5288 rs6000_preferred_simd_mode (machine_mode mode)
5290 if (TARGET_VSX)
5291 switch (mode)
5293 case DFmode:
5294 return V2DFmode;
5295 default:;
5297 if (TARGET_ALTIVEC || TARGET_VSX)
5298 switch (mode)
5300 case SFmode:
5301 return V4SFmode;
5302 case TImode:
5303 return V1TImode;
5304 case DImode:
5305 return V2DImode;
5306 case SImode:
5307 return V4SImode;
5308 case HImode:
5309 return V8HImode;
5310 case QImode:
5311 return V16QImode;
5312 default:;
5314 if (TARGET_SPE)
5315 switch (mode)
5317 case SFmode:
5318 return V2SFmode;
5319 case SImode:
5320 return V2SImode;
5321 default:;
5323 if (TARGET_PAIRED_FLOAT
5324 && mode == SFmode)
5325 return V2SFmode;
5326 return word_mode;
5329 typedef struct _rs6000_cost_data
5331 struct loop *loop_info;
5332 unsigned cost[3];
5333 } rs6000_cost_data;
5335 /* Test for likely overcommitment of vector hardware resources. If a
5336 loop iteration is relatively large, and too large a percentage of
5337 instructions in the loop are vectorized, the cost model may not
5338 adequately reflect delays from unavailable vector resources.
5339 Penalize the loop body cost for this case. */
5341 static void
5342 rs6000_density_test (rs6000_cost_data *data)
5344 const int DENSITY_PCT_THRESHOLD = 85;
5345 const int DENSITY_SIZE_THRESHOLD = 70;
5346 const int DENSITY_PENALTY = 10;
5347 struct loop *loop = data->loop_info;
5348 basic_block *bbs = get_loop_body (loop);
5349 int nbbs = loop->num_nodes;
5350 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5351 int i, density_pct;
5353 for (i = 0; i < nbbs; i++)
5355 basic_block bb = bbs[i];
5356 gimple_stmt_iterator gsi;
5358 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5360 gimple *stmt = gsi_stmt (gsi);
5361 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5363 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5364 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5365 not_vec_cost++;
5369 free (bbs);
5370 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5372 if (density_pct > DENSITY_PCT_THRESHOLD
5373 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5375 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5376 if (dump_enabled_p ())
5377 dump_printf_loc (MSG_NOTE, vect_location,
5378 "density %d%%, cost %d exceeds threshold, penalizing "
5379 "loop body cost by %d%%", density_pct,
5380 vec_cost + not_vec_cost, DENSITY_PENALTY);
5384 /* Implement targetm.vectorize.init_cost. */
5386 static void *
5387 rs6000_init_cost (struct loop *loop_info)
5389 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5390 data->loop_info = loop_info;
5391 data->cost[vect_prologue] = 0;
5392 data->cost[vect_body] = 0;
5393 data->cost[vect_epilogue] = 0;
5394 return data;
5397 /* Implement targetm.vectorize.add_stmt_cost. */
5399 static unsigned
5400 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5401 struct _stmt_vec_info *stmt_info, int misalign,
5402 enum vect_cost_model_location where)
5404 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5405 unsigned retval = 0;
5407 if (flag_vect_cost_model)
5409 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5410 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5411 misalign);
5412 /* Statements in an inner loop relative to the loop being
5413 vectorized are weighted more heavily. The value here is
5414 arbitrary and could potentially be improved with analysis. */
5415 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5416 count *= 50; /* FIXME. */
5418 retval = (unsigned) (count * stmt_cost);
5419 cost_data->cost[where] += retval;
5422 return retval;
5425 /* Implement targetm.vectorize.finish_cost. */
5427 static void
5428 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5429 unsigned *body_cost, unsigned *epilogue_cost)
5431 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5433 if (cost_data->loop_info)
5434 rs6000_density_test (cost_data);
5436 *prologue_cost = cost_data->cost[vect_prologue];
5437 *body_cost = cost_data->cost[vect_body];
5438 *epilogue_cost = cost_data->cost[vect_epilogue];
5441 /* Implement targetm.vectorize.destroy_cost_data. */
5443 static void
5444 rs6000_destroy_cost_data (void *data)
5446 free (data);
5449 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5450 library with vectorized intrinsics. */
5452 static tree
5453 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5454 tree type_in)
5456 char name[32];
5457 const char *suffix = NULL;
5458 tree fntype, new_fndecl, bdecl = NULL_TREE;
5459 int n_args = 1;
5460 const char *bname;
5461 machine_mode el_mode, in_mode;
5462 int n, in_n;
5464 /* Libmass is suitable for unsafe math only as it does not correctly support
5465 parts of IEEE with the required precision such as denormals. Only support
5466 it if we have VSX to use the simd d2 or f4 functions.
5467 XXX: Add variable length support. */
5468 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5469 return NULL_TREE;
5471 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5472 n = TYPE_VECTOR_SUBPARTS (type_out);
5473 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5474 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5475 if (el_mode != in_mode
5476 || n != in_n)
5477 return NULL_TREE;
5479 switch (fn)
5481 CASE_CFN_ATAN2:
5482 CASE_CFN_HYPOT:
5483 CASE_CFN_POW:
5484 n_args = 2;
5485 /* fall through */
5487 CASE_CFN_ACOS:
5488 CASE_CFN_ACOSH:
5489 CASE_CFN_ASIN:
5490 CASE_CFN_ASINH:
5491 CASE_CFN_ATAN:
5492 CASE_CFN_ATANH:
5493 CASE_CFN_CBRT:
5494 CASE_CFN_COS:
5495 CASE_CFN_COSH:
5496 CASE_CFN_ERF:
5497 CASE_CFN_ERFC:
5498 CASE_CFN_EXP2:
5499 CASE_CFN_EXP:
5500 CASE_CFN_EXPM1:
5501 CASE_CFN_LGAMMA:
5502 CASE_CFN_LOG10:
5503 CASE_CFN_LOG1P:
5504 CASE_CFN_LOG2:
5505 CASE_CFN_LOG:
5506 CASE_CFN_SIN:
5507 CASE_CFN_SINH:
5508 CASE_CFN_SQRT:
5509 CASE_CFN_TAN:
5510 CASE_CFN_TANH:
5511 if (el_mode == DFmode && n == 2)
5513 bdecl = mathfn_built_in (double_type_node, fn);
5514 suffix = "d2"; /* pow -> powd2 */
5516 else if (el_mode == SFmode && n == 4)
5518 bdecl = mathfn_built_in (float_type_node, fn);
5519 suffix = "4"; /* powf -> powf4 */
5521 else
5522 return NULL_TREE;
5523 if (!bdecl)
5524 return NULL_TREE;
5525 break;
5527 default:
5528 return NULL_TREE;
5531 gcc_assert (suffix != NULL);
5532 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5533 if (!bname)
5534 return NULL_TREE;
5536 strcpy (name, bname + sizeof ("__builtin_") - 1);
5537 strcat (name, suffix);
5539 if (n_args == 1)
5540 fntype = build_function_type_list (type_out, type_in, NULL);
5541 else if (n_args == 2)
5542 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5543 else
5544 gcc_unreachable ();
5546 /* Build a function declaration for the vectorized function. */
5547 new_fndecl = build_decl (BUILTINS_LOCATION,
5548 FUNCTION_DECL, get_identifier (name), fntype);
5549 TREE_PUBLIC (new_fndecl) = 1;
5550 DECL_EXTERNAL (new_fndecl) = 1;
5551 DECL_IS_NOVOPS (new_fndecl) = 1;
5552 TREE_READONLY (new_fndecl) = 1;
5554 return new_fndecl;
5557 /* Returns a function decl for a vectorized version of the builtin function
5558 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5559 if it is not available. */
5561 static tree
5562 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5563 tree type_in)
5565 machine_mode in_mode, out_mode;
5566 int in_n, out_n;
5568 if (TARGET_DEBUG_BUILTIN)
5569 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5570 combined_fn_name (combined_fn (fn)),
5571 GET_MODE_NAME (TYPE_MODE (type_out)),
5572 GET_MODE_NAME (TYPE_MODE (type_in)));
5574 if (TREE_CODE (type_out) != VECTOR_TYPE
5575 || TREE_CODE (type_in) != VECTOR_TYPE
5576 || !TARGET_VECTORIZE_BUILTINS)
5577 return NULL_TREE;
5579 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5580 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5581 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5582 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5584 switch (fn)
5586 CASE_CFN_COPYSIGN:
5587 if (VECTOR_UNIT_VSX_P (V2DFmode)
5588 && out_mode == DFmode && out_n == 2
5589 && in_mode == DFmode && in_n == 2)
5590 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5591 if (VECTOR_UNIT_VSX_P (V4SFmode)
5592 && out_mode == SFmode && out_n == 4
5593 && in_mode == SFmode && in_n == 4)
5594 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5595 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5596 && out_mode == SFmode && out_n == 4
5597 && in_mode == SFmode && in_n == 4)
5598 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5599 break;
5600 CASE_CFN_CEIL:
5601 if (VECTOR_UNIT_VSX_P (V2DFmode)
5602 && out_mode == DFmode && out_n == 2
5603 && in_mode == DFmode && in_n == 2)
5604 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5605 if (VECTOR_UNIT_VSX_P (V4SFmode)
5606 && out_mode == SFmode && out_n == 4
5607 && in_mode == SFmode && in_n == 4)
5608 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5609 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5610 && out_mode == SFmode && out_n == 4
5611 && in_mode == SFmode && in_n == 4)
5612 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5613 break;
5614 CASE_CFN_FLOOR:
5615 if (VECTOR_UNIT_VSX_P (V2DFmode)
5616 && out_mode == DFmode && out_n == 2
5617 && in_mode == DFmode && in_n == 2)
5618 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5619 if (VECTOR_UNIT_VSX_P (V4SFmode)
5620 && out_mode == SFmode && out_n == 4
5621 && in_mode == SFmode && in_n == 4)
5622 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5623 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5624 && out_mode == SFmode && out_n == 4
5625 && in_mode == SFmode && in_n == 4)
5626 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5627 break;
5628 CASE_CFN_FMA:
5629 if (VECTOR_UNIT_VSX_P (V2DFmode)
5630 && out_mode == DFmode && out_n == 2
5631 && in_mode == DFmode && in_n == 2)
5632 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5633 if (VECTOR_UNIT_VSX_P (V4SFmode)
5634 && out_mode == SFmode && out_n == 4
5635 && in_mode == SFmode && in_n == 4)
5636 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5637 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5638 && out_mode == SFmode && out_n == 4
5639 && in_mode == SFmode && in_n == 4)
5640 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5641 break;
5642 CASE_CFN_TRUNC:
5643 if (VECTOR_UNIT_VSX_P (V2DFmode)
5644 && out_mode == DFmode && out_n == 2
5645 && in_mode == DFmode && in_n == 2)
5646 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5647 if (VECTOR_UNIT_VSX_P (V4SFmode)
5648 && out_mode == SFmode && out_n == 4
5649 && in_mode == SFmode && in_n == 4)
5650 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5651 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5652 && out_mode == SFmode && out_n == 4
5653 && in_mode == SFmode && in_n == 4)
5654 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5655 break;
5656 CASE_CFN_NEARBYINT:
5657 if (VECTOR_UNIT_VSX_P (V2DFmode)
5658 && flag_unsafe_math_optimizations
5659 && out_mode == DFmode && out_n == 2
5660 && in_mode == DFmode && in_n == 2)
5661 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5662 if (VECTOR_UNIT_VSX_P (V4SFmode)
5663 && flag_unsafe_math_optimizations
5664 && out_mode == SFmode && out_n == 4
5665 && in_mode == SFmode && in_n == 4)
5666 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5667 break;
5668 CASE_CFN_RINT:
5669 if (VECTOR_UNIT_VSX_P (V2DFmode)
5670 && !flag_trapping_math
5671 && out_mode == DFmode && out_n == 2
5672 && in_mode == DFmode && in_n == 2)
5673 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5674 if (VECTOR_UNIT_VSX_P (V4SFmode)
5675 && !flag_trapping_math
5676 && out_mode == SFmode && out_n == 4
5677 && in_mode == SFmode && in_n == 4)
5678 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5679 break;
5680 default:
5681 break;
5684 /* Generate calls to libmass if appropriate. */
5685 if (rs6000_veclib_handler)
5686 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5688 return NULL_TREE;
5691 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5693 static tree
5694 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5695 tree type_in)
5697 machine_mode in_mode, out_mode;
5698 int in_n, out_n;
5700 if (TARGET_DEBUG_BUILTIN)
5701 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5702 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5703 GET_MODE_NAME (TYPE_MODE (type_out)),
5704 GET_MODE_NAME (TYPE_MODE (type_in)));
5706 if (TREE_CODE (type_out) != VECTOR_TYPE
5707 || TREE_CODE (type_in) != VECTOR_TYPE
5708 || !TARGET_VECTORIZE_BUILTINS)
5709 return NULL_TREE;
5711 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5712 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5713 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5714 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5716 enum rs6000_builtins fn
5717 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5718 switch (fn)
5720 case RS6000_BUILTIN_RSQRTF:
5721 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5722 && out_mode == SFmode && out_n == 4
5723 && in_mode == SFmode && in_n == 4)
5724 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5725 break;
5726 case RS6000_BUILTIN_RSQRT:
5727 if (VECTOR_UNIT_VSX_P (V2DFmode)
5728 && out_mode == DFmode && out_n == 2
5729 && in_mode == DFmode && in_n == 2)
5730 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5731 break;
5732 case RS6000_BUILTIN_RECIPF:
5733 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5734 && out_mode == SFmode && out_n == 4
5735 && in_mode == SFmode && in_n == 4)
5736 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5737 break;
5738 case RS6000_BUILTIN_RECIP:
5739 if (VECTOR_UNIT_VSX_P (V2DFmode)
5740 && out_mode == DFmode && out_n == 2
5741 && in_mode == DFmode && in_n == 2)
5742 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5743 break;
5744 default:
5745 break;
5747 return NULL_TREE;
5750 /* Default CPU string for rs6000*_file_start functions. */
5751 static const char *rs6000_default_cpu;
5753 /* Do anything needed at the start of the asm file. */
5755 static void
5756 rs6000_file_start (void)
5758 char buffer[80];
5759 const char *start = buffer;
5760 FILE *file = asm_out_file;
5762 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5764 default_file_start ();
5766 if (flag_verbose_asm)
5768 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5770 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5772 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5773 start = "";
5776 if (global_options_set.x_rs6000_cpu_index)
5778 fprintf (file, "%s -mcpu=%s", start,
5779 processor_target_table[rs6000_cpu_index].name);
5780 start = "";
5783 if (global_options_set.x_rs6000_tune_index)
5785 fprintf (file, "%s -mtune=%s", start,
5786 processor_target_table[rs6000_tune_index].name);
5787 start = "";
5790 if (PPC405_ERRATUM77)
5792 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5793 start = "";
5796 #ifdef USING_ELFOS_H
5797 switch (rs6000_sdata)
5799 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5800 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5801 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5802 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5805 if (rs6000_sdata && g_switch_value)
5807 fprintf (file, "%s -G %d", start,
5808 g_switch_value);
5809 start = "";
5811 #endif
5813 if (*start == '\0')
5814 putc ('\n', file);
5817 #ifdef USING_ELFOS_H
5818 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5819 && !global_options_set.x_rs6000_cpu_index)
5821 fputs ("\t.machine ", asm_out_file);
5822 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5823 fputs ("power9\n", asm_out_file);
5824 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5825 fputs ("power8\n", asm_out_file);
5826 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5827 fputs ("power7\n", asm_out_file);
5828 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5829 fputs ("power6\n", asm_out_file);
5830 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5831 fputs ("power5\n", asm_out_file);
5832 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5833 fputs ("power4\n", asm_out_file);
5834 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5835 fputs ("ppc64\n", asm_out_file);
5836 else
5837 fputs ("ppc\n", asm_out_file);
5839 #endif
5841 if (DEFAULT_ABI == ABI_ELFv2)
5842 fprintf (file, "\t.abiversion 2\n");
5846 /* Return nonzero if this function is known to have a null epilogue. */
5849 direct_return (void)
5851 if (reload_completed)
5853 rs6000_stack_t *info = rs6000_stack_info ();
5855 if (info->first_gp_reg_save == 32
5856 && info->first_fp_reg_save == 64
5857 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5858 && ! info->lr_save_p
5859 && ! info->cr_save_p
5860 && info->vrsave_size == 0
5861 && ! info->push_p)
5862 return 1;
5865 return 0;
5868 /* Return the number of instructions it takes to form a constant in an
5869 integer register. */
5872 num_insns_constant_wide (HOST_WIDE_INT value)
5874 /* signed constant loadable with addi */
5875 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5876 return 1;
5878 /* constant loadable with addis */
5879 else if ((value & 0xffff) == 0
5880 && (value >> 31 == -1 || value >> 31 == 0))
5881 return 1;
5883 else if (TARGET_POWERPC64)
5885 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5886 HOST_WIDE_INT high = value >> 31;
5888 if (high == 0 || high == -1)
5889 return 2;
5891 high >>= 1;
5893 if (low == 0)
5894 return num_insns_constant_wide (high) + 1;
5895 else if (high == 0)
5896 return num_insns_constant_wide (low) + 1;
5897 else
5898 return (num_insns_constant_wide (high)
5899 + num_insns_constant_wide (low) + 1);
5902 else
5903 return 2;
5907 num_insns_constant (rtx op, machine_mode mode)
5909 HOST_WIDE_INT low, high;
5911 switch (GET_CODE (op))
5913 case CONST_INT:
5914 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5915 && rs6000_is_valid_and_mask (op, mode))
5916 return 2;
5917 else
5918 return num_insns_constant_wide (INTVAL (op));
5920 case CONST_WIDE_INT:
5922 int i;
5923 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5924 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5925 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5926 return ins;
5929 case CONST_DOUBLE:
5930 if (mode == SFmode || mode == SDmode)
5932 long l;
5934 if (DECIMAL_FLOAT_MODE_P (mode))
5935 REAL_VALUE_TO_TARGET_DECIMAL32
5936 (*CONST_DOUBLE_REAL_VALUE (op), l);
5937 else
5938 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5939 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5942 long l[2];
5943 if (DECIMAL_FLOAT_MODE_P (mode))
5944 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
5945 else
5946 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5947 high = l[WORDS_BIG_ENDIAN == 0];
5948 low = l[WORDS_BIG_ENDIAN != 0];
5950 if (TARGET_32BIT)
5951 return (num_insns_constant_wide (low)
5952 + num_insns_constant_wide (high));
5953 else
5955 if ((high == 0 && low >= 0)
5956 || (high == -1 && low < 0))
5957 return num_insns_constant_wide (low);
5959 else if (rs6000_is_valid_and_mask (op, mode))
5960 return 2;
5962 else if (low == 0)
5963 return num_insns_constant_wide (high) + 1;
5965 else
5966 return (num_insns_constant_wide (high)
5967 + num_insns_constant_wide (low) + 1);
5970 default:
5971 gcc_unreachable ();
5975 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5976 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5977 corresponding element of the vector, but for V4SFmode and V2SFmode,
5978 the corresponding "float" is interpreted as an SImode integer. */
5980 HOST_WIDE_INT
5981 const_vector_elt_as_int (rtx op, unsigned int elt)
5983 rtx tmp;
5985 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5986 gcc_assert (GET_MODE (op) != V2DImode
5987 && GET_MODE (op) != V2DFmode);
5989 tmp = CONST_VECTOR_ELT (op, elt);
5990 if (GET_MODE (op) == V4SFmode
5991 || GET_MODE (op) == V2SFmode)
5992 tmp = gen_lowpart (SImode, tmp);
5993 return INTVAL (tmp);
5996 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5997 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5998 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5999 all items are set to the same value and contain COPIES replicas of the
6000 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6001 operand and the others are set to the value of the operand's msb. */
6003 static bool
6004 vspltis_constant (rtx op, unsigned step, unsigned copies)
6006 machine_mode mode = GET_MODE (op);
6007 machine_mode inner = GET_MODE_INNER (mode);
6009 unsigned i;
6010 unsigned nunits;
6011 unsigned bitsize;
6012 unsigned mask;
6014 HOST_WIDE_INT val;
6015 HOST_WIDE_INT splat_val;
6016 HOST_WIDE_INT msb_val;
6018 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6019 return false;
6021 nunits = GET_MODE_NUNITS (mode);
6022 bitsize = GET_MODE_BITSIZE (inner);
6023 mask = GET_MODE_MASK (inner);
6025 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6026 splat_val = val;
6027 msb_val = val >= 0 ? 0 : -1;
6029 /* Construct the value to be splatted, if possible. If not, return 0. */
6030 for (i = 2; i <= copies; i *= 2)
6032 HOST_WIDE_INT small_val;
6033 bitsize /= 2;
6034 small_val = splat_val >> bitsize;
6035 mask >>= bitsize;
6036 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
6037 return false;
6038 splat_val = small_val;
6041 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6042 if (EASY_VECTOR_15 (splat_val))
6045 /* Also check if we can splat, and then add the result to itself. Do so if
6046 the value is positive, of if the splat instruction is using OP's mode;
6047 for splat_val < 0, the splat and the add should use the same mode. */
6048 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6049 && (splat_val >= 0 || (step == 1 && copies == 1)))
6052 /* Also check if are loading up the most significant bit which can be done by
6053 loading up -1 and shifting the value left by -1. */
6054 else if (EASY_VECTOR_MSB (splat_val, inner))
6057 else
6058 return false;
6060 /* Check if VAL is present in every STEP-th element, and the
6061 other elements are filled with its most significant bit. */
6062 for (i = 1; i < nunits; ++i)
6064 HOST_WIDE_INT desired_val;
6065 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6066 if ((i & (step - 1)) == 0)
6067 desired_val = val;
6068 else
6069 desired_val = msb_val;
6071 if (desired_val != const_vector_elt_as_int (op, elt))
6072 return false;
6075 return true;
6078 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6079 instruction, filling in the bottom elements with 0 or -1.
6081 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6082 for the number of zeroes to shift in, or negative for the number of 0xff
6083 bytes to shift in.
6085 OP is a CONST_VECTOR. */
6088 vspltis_shifted (rtx op)
6090 machine_mode mode = GET_MODE (op);
6091 machine_mode inner = GET_MODE_INNER (mode);
6093 unsigned i, j;
6094 unsigned nunits;
6095 unsigned mask;
6097 HOST_WIDE_INT val;
6099 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6100 return false;
6102 /* We need to create pseudo registers to do the shift, so don't recognize
6103 shift vector constants after reload. */
6104 if (!can_create_pseudo_p ())
6105 return false;
6107 nunits = GET_MODE_NUNITS (mode);
6108 mask = GET_MODE_MASK (inner);
6110 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6112 /* Check if the value can really be the operand of a vspltis[bhw]. */
6113 if (EASY_VECTOR_15 (val))
6116 /* Also check if we are loading up the most significant bit which can be done
6117 by loading up -1 and shifting the value left by -1. */
6118 else if (EASY_VECTOR_MSB (val, inner))
6121 else
6122 return 0;
6124 /* Check if VAL is present in every STEP-th element until we find elements
6125 that are 0 or all 1 bits. */
6126 for (i = 1; i < nunits; ++i)
6128 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6129 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6131 /* If the value isn't the splat value, check for the remaining elements
6132 being 0/-1. */
6133 if (val != elt_val)
6135 if (elt_val == 0)
6137 for (j = i+1; j < nunits; ++j)
6139 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6140 if (const_vector_elt_as_int (op, elt2) != 0)
6141 return 0;
6144 return (nunits - i) * GET_MODE_SIZE (inner);
6147 else if ((elt_val & mask) == mask)
6149 for (j = i+1; j < nunits; ++j)
6151 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6152 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6153 return 0;
6156 return -((nunits - i) * GET_MODE_SIZE (inner));
6159 else
6160 return 0;
6164 /* If all elements are equal, we don't need to do VLSDOI. */
6165 return 0;
6169 /* Return true if OP is of the given MODE and can be synthesized
6170 with a vspltisb, vspltish or vspltisw. */
6172 bool
6173 easy_altivec_constant (rtx op, machine_mode mode)
6175 unsigned step, copies;
6177 if (mode == VOIDmode)
6178 mode = GET_MODE (op);
6179 else if (mode != GET_MODE (op))
6180 return false;
6182 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6183 constants. */
6184 if (mode == V2DFmode)
6185 return zero_constant (op, mode);
6187 else if (mode == V2DImode)
6189 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6190 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6191 return false;
6193 if (zero_constant (op, mode))
6194 return true;
6196 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6197 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6198 return true;
6200 return false;
6203 /* V1TImode is a special container for TImode. Ignore for now. */
6204 else if (mode == V1TImode)
6205 return false;
6207 /* Start with a vspltisw. */
6208 step = GET_MODE_NUNITS (mode) / 4;
6209 copies = 1;
6211 if (vspltis_constant (op, step, copies))
6212 return true;
6214 /* Then try with a vspltish. */
6215 if (step == 1)
6216 copies <<= 1;
6217 else
6218 step >>= 1;
6220 if (vspltis_constant (op, step, copies))
6221 return true;
6223 /* And finally a vspltisb. */
6224 if (step == 1)
6225 copies <<= 1;
6226 else
6227 step >>= 1;
6229 if (vspltis_constant (op, step, copies))
6230 return true;
6232 if (vspltis_shifted (op) != 0)
6233 return true;
6235 return false;
6238 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6239 result is OP. Abort if it is not possible. */
6242 gen_easy_altivec_constant (rtx op)
6244 machine_mode mode = GET_MODE (op);
6245 int nunits = GET_MODE_NUNITS (mode);
6246 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6247 unsigned step = nunits / 4;
6248 unsigned copies = 1;
6250 /* Start with a vspltisw. */
6251 if (vspltis_constant (op, step, copies))
6252 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6254 /* Then try with a vspltish. */
6255 if (step == 1)
6256 copies <<= 1;
6257 else
6258 step >>= 1;
6260 if (vspltis_constant (op, step, copies))
6261 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6263 /* And finally a vspltisb. */
6264 if (step == 1)
6265 copies <<= 1;
6266 else
6267 step >>= 1;
6269 if (vspltis_constant (op, step, copies))
6270 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6272 gcc_unreachable ();
6275 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6276 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6278 Return the number of instructions needed (1 or 2) into the address pointed
6279 via NUM_INSNS_PTR.
6281 Return the constant that is being split via CONSTANT_PTR. */
6283 bool
6284 xxspltib_constant_p (rtx op,
6285 machine_mode mode,
6286 int *num_insns_ptr,
6287 int *constant_ptr)
6289 size_t nunits = GET_MODE_NUNITS (mode);
6290 size_t i;
6291 HOST_WIDE_INT value;
6292 rtx element;
6294 /* Set the returned values to out of bound values. */
6295 *num_insns_ptr = -1;
6296 *constant_ptr = 256;
6298 if (!TARGET_P9_VECTOR)
6299 return false;
6301 if (mode == VOIDmode)
6302 mode = GET_MODE (op);
6304 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6305 return false;
6307 /* Handle (vec_duplicate <constant>). */
6308 if (GET_CODE (op) == VEC_DUPLICATE)
6310 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6311 && mode != V2DImode)
6312 return false;
6314 element = XEXP (op, 0);
6315 if (!CONST_INT_P (element))
6316 return false;
6318 value = INTVAL (element);
6319 if (!IN_RANGE (value, -128, 127))
6320 return false;
6323 /* Handle (const_vector [...]). */
6324 else if (GET_CODE (op) == CONST_VECTOR)
6326 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6327 && mode != V2DImode)
6328 return false;
6330 element = CONST_VECTOR_ELT (op, 0);
6331 if (!CONST_INT_P (element))
6332 return false;
6334 value = INTVAL (element);
6335 if (!IN_RANGE (value, -128, 127))
6336 return false;
6338 for (i = 1; i < nunits; i++)
6340 element = CONST_VECTOR_ELT (op, i);
6341 if (!CONST_INT_P (element))
6342 return false;
6344 if (value != INTVAL (element))
6345 return false;
6349 /* Handle integer constants being loaded into the upper part of the VSX
6350 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6351 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6352 else if (CONST_INT_P (op))
6354 if (!SCALAR_INT_MODE_P (mode))
6355 return false;
6357 value = INTVAL (op);
6358 if (!IN_RANGE (value, -128, 127))
6359 return false;
6361 if (!IN_RANGE (value, -1, 0))
6363 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6364 return false;
6366 if (EASY_VECTOR_15 (value))
6367 return false;
6371 else
6372 return false;
6374 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6375 sign extend. Special case 0/-1 to allow getting any VSX register instead
6376 of an Altivec register. */
6377 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6378 && EASY_VECTOR_15 (value))
6379 return false;
6381 /* Return # of instructions and the constant byte for XXSPLTIB. */
6382 if (mode == V16QImode)
6383 *num_insns_ptr = 1;
6385 else if (IN_RANGE (value, -1, 0))
6386 *num_insns_ptr = 1;
6388 else
6389 *num_insns_ptr = 2;
6391 *constant_ptr = (int) value;
6392 return true;
6395 const char *
6396 output_vec_const_move (rtx *operands)
6398 int cst, cst2, shift;
6399 machine_mode mode;
6400 rtx dest, vec;
6402 dest = operands[0];
6403 vec = operands[1];
6404 mode = GET_MODE (dest);
6406 if (TARGET_VSX)
6408 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6409 int xxspltib_value = 256;
6410 int num_insns = -1;
6412 if (zero_constant (vec, mode))
6414 if (TARGET_P9_VECTOR)
6415 return "xxspltib %x0,0";
6417 else if (dest_vmx_p)
6418 return "vspltisw %0,0";
6420 else
6421 return "xxlxor %x0,%x0,%x0";
6424 if (all_ones_constant (vec, mode))
6426 if (TARGET_P9_VECTOR)
6427 return "xxspltib %x0,255";
6429 else if (dest_vmx_p)
6430 return "vspltisw %0,-1";
6432 else if (TARGET_P8_VECTOR)
6433 return "xxlorc %x0,%x0,%x0";
6435 else
6436 gcc_unreachable ();
6439 if (TARGET_P9_VECTOR
6440 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6442 if (num_insns == 1)
6444 operands[2] = GEN_INT (xxspltib_value & 0xff);
6445 return "xxspltib %x0,%2";
6448 return "#";
6452 if (TARGET_ALTIVEC)
6454 rtx splat_vec;
6456 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6457 if (zero_constant (vec, mode))
6458 return "vspltisw %0,0";
6460 if (all_ones_constant (vec, mode))
6461 return "vspltisw %0,-1";
6463 /* Do we need to construct a value using VSLDOI? */
6464 shift = vspltis_shifted (vec);
6465 if (shift != 0)
6466 return "#";
6468 splat_vec = gen_easy_altivec_constant (vec);
6469 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6470 operands[1] = XEXP (splat_vec, 0);
6471 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6472 return "#";
6474 switch (GET_MODE (splat_vec))
6476 case V4SImode:
6477 return "vspltisw %0,%1";
6479 case V8HImode:
6480 return "vspltish %0,%1";
6482 case V16QImode:
6483 return "vspltisb %0,%1";
6485 default:
6486 gcc_unreachable ();
6490 gcc_assert (TARGET_SPE);
6492 /* Vector constant 0 is handled as a splitter of V2SI, and in the
6493 pattern of V1DI, V4HI, and V2SF.
6495 FIXME: We should probably return # and add post reload
6496 splitters for these, but this way is so easy ;-). */
6497 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
6498 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
6499 operands[1] = CONST_VECTOR_ELT (vec, 0);
6500 operands[2] = CONST_VECTOR_ELT (vec, 1);
6501 if (cst == cst2)
6502 return "li %0,%1\n\tevmergelo %0,%0,%0";
6503 else if (WORDS_BIG_ENDIAN)
6504 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
6505 else
6506 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
6509 /* Initialize TARGET of vector PAIRED to VALS. */
6511 void
6512 paired_expand_vector_init (rtx target, rtx vals)
6514 machine_mode mode = GET_MODE (target);
6515 int n_elts = GET_MODE_NUNITS (mode);
6516 int n_var = 0;
6517 rtx x, new_rtx, tmp, constant_op, op1, op2;
6518 int i;
6520 for (i = 0; i < n_elts; ++i)
6522 x = XVECEXP (vals, 0, i);
6523 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6524 ++n_var;
6526 if (n_var == 0)
6528 /* Load from constant pool. */
6529 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6530 return;
6533 if (n_var == 2)
6535 /* The vector is initialized only with non-constants. */
6536 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6537 XVECEXP (vals, 0, 1));
6539 emit_move_insn (target, new_rtx);
6540 return;
6543 /* One field is non-constant and the other one is a constant. Load the
6544 constant from the constant pool and use ps_merge instruction to
6545 construct the whole vector. */
6546 op1 = XVECEXP (vals, 0, 0);
6547 op2 = XVECEXP (vals, 0, 1);
6549 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6551 tmp = gen_reg_rtx (GET_MODE (constant_op));
6552 emit_move_insn (tmp, constant_op);
6554 if (CONSTANT_P (op1))
6555 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6556 else
6557 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6559 emit_move_insn (target, new_rtx);
6562 void
6563 paired_expand_vector_move (rtx operands[])
6565 rtx op0 = operands[0], op1 = operands[1];
6567 emit_move_insn (op0, op1);
6570 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6571 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6572 operands for the relation operation COND. This is a recursive
6573 function. */
6575 static void
6576 paired_emit_vector_compare (enum rtx_code rcode,
6577 rtx dest, rtx op0, rtx op1,
6578 rtx cc_op0, rtx cc_op1)
6580 rtx tmp = gen_reg_rtx (V2SFmode);
6581 rtx tmp1, max, min;
6583 gcc_assert (TARGET_PAIRED_FLOAT);
6584 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6586 switch (rcode)
6588 case LT:
6589 case LTU:
6590 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6591 return;
6592 case GE:
6593 case GEU:
6594 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6595 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6596 return;
6597 case LE:
6598 case LEU:
6599 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6600 return;
6601 case GT:
6602 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6603 return;
6604 case EQ:
6605 tmp1 = gen_reg_rtx (V2SFmode);
6606 max = gen_reg_rtx (V2SFmode);
6607 min = gen_reg_rtx (V2SFmode);
6608 gen_reg_rtx (V2SFmode);
6610 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6611 emit_insn (gen_selv2sf4
6612 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6613 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6614 emit_insn (gen_selv2sf4
6615 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6616 emit_insn (gen_subv2sf3 (tmp1, min, max));
6617 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6618 return;
6619 case NE:
6620 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6621 return;
6622 case UNLE:
6623 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6624 return;
6625 case UNLT:
6626 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6627 return;
6628 case UNGE:
6629 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6630 return;
6631 case UNGT:
6632 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6633 return;
6634 default:
6635 gcc_unreachable ();
6638 return;
6641 /* Emit vector conditional expression.
6642 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6643 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6646 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6647 rtx cond, rtx cc_op0, rtx cc_op1)
6649 enum rtx_code rcode = GET_CODE (cond);
6651 if (!TARGET_PAIRED_FLOAT)
6652 return 0;
6654 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6656 return 1;
6659 /* Initialize vector TARGET to VALS. */
6661 void
6662 rs6000_expand_vector_init (rtx target, rtx vals)
6664 machine_mode mode = GET_MODE (target);
6665 machine_mode inner_mode = GET_MODE_INNER (mode);
6666 int n_elts = GET_MODE_NUNITS (mode);
6667 int n_var = 0, one_var = -1;
6668 bool all_same = true, all_const_zero = true;
6669 rtx x, mem;
6670 int i;
6672 for (i = 0; i < n_elts; ++i)
6674 x = XVECEXP (vals, 0, i);
6675 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6676 ++n_var, one_var = i;
6677 else if (x != CONST0_RTX (inner_mode))
6678 all_const_zero = false;
6680 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6681 all_same = false;
6684 if (n_var == 0)
6686 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6687 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6688 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6690 /* Zero register. */
6691 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (mode, target, target)));
6692 return;
6694 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6696 /* Splat immediate. */
6697 emit_insn (gen_rtx_SET (target, const_vec));
6698 return;
6700 else
6702 /* Load from constant pool. */
6703 emit_move_insn (target, const_vec);
6704 return;
6708 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6709 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6711 rtx op0 = XVECEXP (vals, 0, 0);
6712 rtx op1 = XVECEXP (vals, 0, 1);
6713 if (all_same)
6715 if (!MEM_P (op0) && !REG_P (op0))
6716 op0 = force_reg (inner_mode, op0);
6717 if (mode == V2DFmode)
6718 emit_insn (gen_vsx_splat_v2df (target, op0));
6719 else
6720 emit_insn (gen_vsx_splat_v2di (target, op0));
6722 else
6724 op0 = force_reg (inner_mode, op0);
6725 op1 = force_reg (inner_mode, op1);
6726 if (mode == V2DFmode)
6727 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
6728 else
6729 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
6731 return;
6734 /* Word values on ISA 3.0 can use mtvsrws, lxvwsx, or vspltisw. V4SF is
6735 complicated since scalars are stored as doubles in the registers. */
6736 if (TARGET_P9_VECTOR && mode == V4SImode && all_same
6737 && VECTOR_MEM_VSX_P (mode))
6739 emit_insn (gen_vsx_splat_v4si (target, XVECEXP (vals, 0, 0)));
6740 return;
6743 /* With single precision floating point on VSX, know that internally single
6744 precision is actually represented as a double, and either make 2 V2DF
6745 vectors, and convert these vectors to single precision, or do one
6746 conversion, and splat the result to the other elements. */
6747 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
6749 if (all_same)
6751 rtx op0 = XVECEXP (vals, 0, 0);
6753 if (TARGET_P9_VECTOR)
6754 emit_insn (gen_vsx_splat_v4sf (target, op0));
6756 else
6758 rtx freg = gen_reg_rtx (V4SFmode);
6759 rtx sreg = force_reg (SFmode, op0);
6760 rtx cvt = (TARGET_XSCVDPSPN
6761 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6762 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6764 emit_insn (cvt);
6765 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6766 const0_rtx));
6769 else
6771 rtx dbl_even = gen_reg_rtx (V2DFmode);
6772 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6773 rtx flt_even = gen_reg_rtx (V4SFmode);
6774 rtx flt_odd = gen_reg_rtx (V4SFmode);
6775 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6776 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6777 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6778 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6780 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6781 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6782 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6783 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6784 rs6000_expand_extract_even (target, flt_even, flt_odd);
6786 return;
6789 /* Store value to stack temp. Load vector element. Splat. However, splat
6790 of 64-bit items is not supported on Altivec. */
6791 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6793 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6794 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6795 XVECEXP (vals, 0, 0));
6796 x = gen_rtx_UNSPEC (VOIDmode,
6797 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6798 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6799 gen_rtvec (2,
6800 gen_rtx_SET (target, mem),
6801 x)));
6802 x = gen_rtx_VEC_SELECT (inner_mode, target,
6803 gen_rtx_PARALLEL (VOIDmode,
6804 gen_rtvec (1, const0_rtx)));
6805 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6806 return;
6809 /* One field is non-constant. Load constant then overwrite
6810 varying field. */
6811 if (n_var == 1)
6813 rtx copy = copy_rtx (vals);
6815 /* Load constant part of vector, substitute neighboring value for
6816 varying element. */
6817 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6818 rs6000_expand_vector_init (target, copy);
6820 /* Insert variable. */
6821 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6822 return;
6825 /* Construct the vector in memory one field at a time
6826 and load the whole vector. */
6827 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6828 for (i = 0; i < n_elts; i++)
6829 emit_move_insn (adjust_address_nv (mem, inner_mode,
6830 i * GET_MODE_SIZE (inner_mode)),
6831 XVECEXP (vals, 0, i));
6832 emit_move_insn (target, mem);
6835 /* Set field ELT of TARGET to VAL. */
6837 void
6838 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6840 machine_mode mode = GET_MODE (target);
6841 machine_mode inner_mode = GET_MODE_INNER (mode);
6842 rtx reg = gen_reg_rtx (mode);
6843 rtx mask, mem, x;
6844 int width = GET_MODE_SIZE (inner_mode);
6845 int i;
6847 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6849 rtx (*set_func) (rtx, rtx, rtx, rtx)
6850 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
6851 emit_insn (set_func (target, target, val, GEN_INT (elt)));
6852 return;
6855 /* Simplify setting single element vectors like V1TImode. */
6856 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6858 emit_move_insn (target, gen_lowpart (mode, val));
6859 return;
6862 /* Load single variable value. */
6863 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6864 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6865 x = gen_rtx_UNSPEC (VOIDmode,
6866 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6867 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6868 gen_rtvec (2,
6869 gen_rtx_SET (reg, mem),
6870 x)));
6872 /* Linear sequence. */
6873 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6874 for (i = 0; i < 16; ++i)
6875 XVECEXP (mask, 0, i) = GEN_INT (i);
6877 /* Set permute mask to insert element into target. */
6878 for (i = 0; i < width; ++i)
6879 XVECEXP (mask, 0, elt*width + i)
6880 = GEN_INT (i + 0x10);
6881 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6883 if (BYTES_BIG_ENDIAN)
6884 x = gen_rtx_UNSPEC (mode,
6885 gen_rtvec (3, target, reg,
6886 force_reg (V16QImode, x)),
6887 UNSPEC_VPERM);
6888 else
6890 if (TARGET_P9_VECTOR)
6891 x = gen_rtx_UNSPEC (mode,
6892 gen_rtvec (3, target, reg,
6893 force_reg (V16QImode, x)),
6894 UNSPEC_VPERMR);
6895 else
6897 /* Invert selector. We prefer to generate VNAND on P8 so
6898 that future fusion opportunities can kick in, but must
6899 generate VNOR elsewhere. */
6900 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6901 rtx iorx = (TARGET_P8_VECTOR
6902 ? gen_rtx_IOR (V16QImode, notx, notx)
6903 : gen_rtx_AND (V16QImode, notx, notx));
6904 rtx tmp = gen_reg_rtx (V16QImode);
6905 emit_insn (gen_rtx_SET (tmp, iorx));
6907 /* Permute with operands reversed and adjusted selector. */
6908 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6909 UNSPEC_VPERM);
6913 emit_insn (gen_rtx_SET (target, x));
6916 /* Extract field ELT from VEC into TARGET. */
6918 void
6919 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6921 machine_mode mode = GET_MODE (vec);
6922 machine_mode inner_mode = GET_MODE_INNER (mode);
6923 rtx mem;
6925 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6927 switch (mode)
6929 default:
6930 break;
6931 case V1TImode:
6932 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6933 emit_move_insn (target, gen_lowpart (TImode, vec));
6934 break;
6935 case V2DFmode:
6936 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6937 return;
6938 case V2DImode:
6939 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6940 return;
6941 case V4SFmode:
6942 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6943 return;
6944 case V16QImode:
6945 if (TARGET_DIRECT_MOVE_64BIT)
6947 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6948 return;
6950 else
6951 break;
6952 case V8HImode:
6953 if (TARGET_DIRECT_MOVE_64BIT)
6955 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6956 return;
6958 else
6959 break;
6960 case V4SImode:
6961 if (TARGET_DIRECT_MOVE_64BIT)
6963 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6964 return;
6966 break;
6969 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6970 && TARGET_DIRECT_MOVE_64BIT)
6972 if (GET_MODE (elt) != DImode)
6974 rtx tmp = gen_reg_rtx (DImode);
6975 convert_move (tmp, elt, 0);
6976 elt = tmp;
6979 switch (mode)
6981 case V2DFmode:
6982 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6983 return;
6985 case V2DImode:
6986 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6987 return;
6989 case V4SFmode:
6990 if (TARGET_UPPER_REGS_SF)
6992 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6993 return;
6995 break;
6997 case V4SImode:
6998 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6999 return;
7001 case V8HImode:
7002 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7003 return;
7005 case V16QImode:
7006 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7007 return;
7009 default:
7010 gcc_unreachable ();
7014 gcc_assert (CONST_INT_P (elt));
7016 /* Allocate mode-sized buffer. */
7017 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7019 emit_move_insn (mem, vec);
7021 /* Add offset to field within buffer matching vector element. */
7022 mem = adjust_address_nv (mem, inner_mode,
7023 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7025 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7028 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7029 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7030 temporary (BASE_TMP) to fixup the address. Return the new memory address
7031 that is valid for reads or writes to a given register (SCALAR_REG). */
7034 rs6000_adjust_vec_address (rtx scalar_reg,
7035 rtx mem,
7036 rtx element,
7037 rtx base_tmp,
7038 machine_mode scalar_mode)
7040 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7041 rtx addr = XEXP (mem, 0);
7042 rtx element_offset;
7043 rtx new_addr;
7044 bool valid_addr_p;
7046 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7047 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7049 /* Calculate what we need to add to the address to get the element
7050 address. */
7051 if (CONST_INT_P (element))
7052 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7053 else
7055 int byte_shift = exact_log2 (scalar_size);
7056 gcc_assert (byte_shift >= 0);
7058 if (byte_shift == 0)
7059 element_offset = element;
7061 else
7063 if (TARGET_POWERPC64)
7064 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7065 else
7066 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7068 element_offset = base_tmp;
7072 /* Create the new address pointing to the element within the vector. If we
7073 are adding 0, we don't have to change the address. */
7074 if (element_offset == const0_rtx)
7075 new_addr = addr;
7077 /* A simple indirect address can be converted into a reg + offset
7078 address. */
7079 else if (REG_P (addr) || SUBREG_P (addr))
7080 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7082 /* Optimize D-FORM addresses with constant offset with a constant element, to
7083 include the element offset in the address directly. */
7084 else if (GET_CODE (addr) == PLUS)
7086 rtx op0 = XEXP (addr, 0);
7087 rtx op1 = XEXP (addr, 1);
7088 rtx insn;
7090 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7091 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7093 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7094 rtx offset_rtx = GEN_INT (offset);
7096 if (IN_RANGE (offset, -32768, 32767)
7097 && (scalar_size < 8 || (offset & 0x3) == 0))
7098 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7099 else
7101 emit_move_insn (base_tmp, offset_rtx);
7102 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7105 else
7107 if (REG_P (op1) || SUBREG_P (op1))
7109 insn = gen_add3_insn (base_tmp, op1, element_offset);
7110 gcc_assert (insn != NULL_RTX);
7111 emit_insn (insn);
7114 else if (REG_P (element_offset) || SUBREG_P (element_offset))
7116 insn = gen_add3_insn (base_tmp, element_offset, op1);
7117 gcc_assert (insn != NULL_RTX);
7118 emit_insn (insn);
7121 else
7123 emit_move_insn (base_tmp, op1);
7124 emit_insn (gen_add2_insn (base_tmp, element_offset));
7127 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7131 else
7133 emit_move_insn (base_tmp, addr);
7134 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7137 /* If we have a PLUS, we need to see whether the particular register class
7138 allows for D-FORM or X-FORM addressing. */
7139 if (GET_CODE (new_addr) == PLUS)
7141 rtx op1 = XEXP (new_addr, 1);
7142 addr_mask_type addr_mask;
7143 int scalar_regno;
7145 if (REG_P (scalar_reg))
7146 scalar_regno = REGNO (scalar_reg);
7147 else if (SUBREG_P (scalar_reg))
7148 scalar_regno = subreg_regno (scalar_reg);
7149 else
7150 gcc_unreachable ();
7152 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7153 if (INT_REGNO_P (scalar_regno))
7154 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7156 else if (FP_REGNO_P (scalar_regno))
7157 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7159 else if (ALTIVEC_REGNO_P (scalar_regno))
7160 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7162 else
7163 gcc_unreachable ();
7165 if (REG_P (op1) || SUBREG_P (op1))
7166 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7167 else
7168 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7171 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7172 valid_addr_p = true;
7174 else
7175 valid_addr_p = false;
7177 if (!valid_addr_p)
7179 emit_move_insn (base_tmp, new_addr);
7180 new_addr = base_tmp;
7183 return change_address (mem, scalar_mode, new_addr);
7186 /* Split a variable vec_extract operation into the component instructions. */
7188 void
7189 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7190 rtx tmp_altivec)
7192 machine_mode mode = GET_MODE (src);
7193 machine_mode scalar_mode = GET_MODE (dest);
7194 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7195 int byte_shift = exact_log2 (scalar_size);
7197 gcc_assert (byte_shift >= 0);
7199 /* If we are given a memory address, optimize to load just the element. We
7200 don't have to adjust the vector element number on little endian
7201 systems. */
7202 if (MEM_P (src))
7204 gcc_assert (REG_P (tmp_gpr));
7205 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7206 tmp_gpr, scalar_mode));
7207 return;
7210 else if (REG_P (src) || SUBREG_P (src))
7212 int bit_shift = byte_shift + 3;
7213 rtx element2;
7215 gcc_assert (REG_P (tmp_gpr) && REG_P (tmp_altivec));
7217 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7218 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7219 will shift the element into the upper position (adding 3 to convert a
7220 byte shift into a bit shift). */
7221 if (scalar_size == 8)
7223 if (!VECTOR_ELT_ORDER_BIG)
7225 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7226 element2 = tmp_gpr;
7228 else
7229 element2 = element;
7231 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7232 bit. */
7233 emit_insn (gen_rtx_SET (tmp_gpr,
7234 gen_rtx_AND (DImode,
7235 gen_rtx_ASHIFT (DImode,
7236 element2,
7237 GEN_INT (6)),
7238 GEN_INT (64))));
7240 else
7242 if (!VECTOR_ELT_ORDER_BIG)
7244 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7246 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7247 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7248 element2 = tmp_gpr;
7250 else
7251 element2 = element;
7253 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7256 /* Get the value into the lower byte of the Altivec register where VSLO
7257 expects it. */
7258 if (TARGET_P9_VECTOR)
7259 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7260 else if (can_create_pseudo_p ())
7261 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7262 else
7264 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7265 emit_move_insn (tmp_di, tmp_gpr);
7266 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7269 /* Do the VSLO to get the value into the final location. */
7270 switch (mode)
7272 case V2DFmode:
7273 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7274 return;
7276 case V2DImode:
7277 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7278 return;
7280 case V4SFmode:
7282 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7283 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7284 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7285 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7286 tmp_altivec));
7288 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7289 return;
7292 case V4SImode:
7293 case V8HImode:
7294 case V16QImode:
7296 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7297 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7298 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7299 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7300 tmp_altivec));
7301 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7302 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7303 GEN_INT (64 - (8 * scalar_size))));
7304 return;
7307 default:
7308 gcc_unreachable ();
7311 return;
7313 else
7314 gcc_unreachable ();
7317 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
7319 bool
7320 invalid_e500_subreg (rtx op, machine_mode mode)
7322 if (TARGET_E500_DOUBLE)
7324 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
7325 subreg:TI and reg:TF. Decimal float modes are like integer
7326 modes (only low part of each register used) for this
7327 purpose. */
7328 if (GET_CODE (op) == SUBREG
7329 && (mode == SImode || mode == DImode || mode == TImode
7330 || mode == DDmode || mode == TDmode || mode == PTImode)
7331 && REG_P (SUBREG_REG (op))
7332 && (GET_MODE (SUBREG_REG (op)) == DFmode
7333 || GET_MODE (SUBREG_REG (op)) == TFmode
7334 || GET_MODE (SUBREG_REG (op)) == IFmode
7335 || GET_MODE (SUBREG_REG (op)) == KFmode))
7336 return true;
7338 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
7339 reg:TI. */
7340 if (GET_CODE (op) == SUBREG
7341 && (mode == DFmode || mode == TFmode || mode == IFmode
7342 || mode == KFmode)
7343 && REG_P (SUBREG_REG (op))
7344 && (GET_MODE (SUBREG_REG (op)) == DImode
7345 || GET_MODE (SUBREG_REG (op)) == TImode
7346 || GET_MODE (SUBREG_REG (op)) == PTImode
7347 || GET_MODE (SUBREG_REG (op)) == DDmode
7348 || GET_MODE (SUBREG_REG (op)) == TDmode))
7349 return true;
7352 if (TARGET_SPE
7353 && GET_CODE (op) == SUBREG
7354 && mode == SImode
7355 && REG_P (SUBREG_REG (op))
7356 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
7357 return true;
7359 return false;
7362 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7363 selects whether the alignment is abi mandated, optional, or
7364 both abi and optional alignment. */
7366 unsigned int
7367 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7369 if (how != align_opt)
7371 if (TREE_CODE (type) == VECTOR_TYPE)
7373 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
7374 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
7376 if (align < 64)
7377 align = 64;
7379 else if (align < 128)
7380 align = 128;
7382 else if (TARGET_E500_DOUBLE
7383 && TREE_CODE (type) == REAL_TYPE
7384 && TYPE_MODE (type) == DFmode)
7386 if (align < 64)
7387 align = 64;
7391 if (how != align_abi)
7393 if (TREE_CODE (type) == ARRAY_TYPE
7394 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7396 if (align < BITS_PER_WORD)
7397 align = BITS_PER_WORD;
7401 return align;
7404 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7406 bool
7407 rs6000_special_adjust_field_align_p (tree field, unsigned int computed)
7409 if (TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7411 if (computed != 128)
7413 static bool warned;
7414 if (!warned && warn_psabi)
7416 warned = true;
7417 inform (input_location,
7418 "the layout of aggregates containing vectors with"
7419 " %d-byte alignment has changed in GCC 5",
7420 computed / BITS_PER_UNIT);
7423 /* In current GCC there is no special case. */
7424 return false;
7427 return false;
7430 /* AIX increases natural record alignment to doubleword if the first
7431 field is an FP double while the FP fields remain word aligned. */
7433 unsigned int
7434 rs6000_special_round_type_align (tree type, unsigned int computed,
7435 unsigned int specified)
7437 unsigned int align = MAX (computed, specified);
7438 tree field = TYPE_FIELDS (type);
7440 /* Skip all non field decls */
7441 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7442 field = DECL_CHAIN (field);
7444 if (field != NULL && field != type)
7446 type = TREE_TYPE (field);
7447 while (TREE_CODE (type) == ARRAY_TYPE)
7448 type = TREE_TYPE (type);
7450 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7451 align = MAX (align, 64);
7454 return align;
7457 /* Darwin increases record alignment to the natural alignment of
7458 the first field. */
7460 unsigned int
7461 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7462 unsigned int specified)
7464 unsigned int align = MAX (computed, specified);
7466 if (TYPE_PACKED (type))
7467 return align;
7469 /* Find the first field, looking down into aggregates. */
7470 do {
7471 tree field = TYPE_FIELDS (type);
7472 /* Skip all non field decls */
7473 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7474 field = DECL_CHAIN (field);
7475 if (! field)
7476 break;
7477 /* A packed field does not contribute any extra alignment. */
7478 if (DECL_PACKED (field))
7479 return align;
7480 type = TREE_TYPE (field);
7481 while (TREE_CODE (type) == ARRAY_TYPE)
7482 type = TREE_TYPE (type);
7483 } while (AGGREGATE_TYPE_P (type));
7485 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7486 align = MAX (align, TYPE_ALIGN (type));
7488 return align;
7491 /* Return 1 for an operand in small memory on V.4/eabi. */
7494 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7495 machine_mode mode ATTRIBUTE_UNUSED)
7497 #if TARGET_ELF
7498 rtx sym_ref;
7500 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7501 return 0;
7503 if (DEFAULT_ABI != ABI_V4)
7504 return 0;
7506 /* Vector and float memory instructions have a limited offset on the
7507 SPE, so using a vector or float variable directly as an operand is
7508 not useful. */
7509 if (TARGET_SPE
7510 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
7511 return 0;
7513 if (GET_CODE (op) == SYMBOL_REF)
7514 sym_ref = op;
7516 else if (GET_CODE (op) != CONST
7517 || GET_CODE (XEXP (op, 0)) != PLUS
7518 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7519 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7520 return 0;
7522 else
7524 rtx sum = XEXP (op, 0);
7525 HOST_WIDE_INT summand;
7527 /* We have to be careful here, because it is the referenced address
7528 that must be 32k from _SDA_BASE_, not just the symbol. */
7529 summand = INTVAL (XEXP (sum, 1));
7530 if (summand < 0 || summand > g_switch_value)
7531 return 0;
7533 sym_ref = XEXP (sum, 0);
7536 return SYMBOL_REF_SMALL_P (sym_ref);
7537 #else
7538 return 0;
7539 #endif
7542 /* Return true if either operand is a general purpose register. */
7544 bool
7545 gpr_or_gpr_p (rtx op0, rtx op1)
7547 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7548 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7551 /* Return true if this is a move direct operation between GPR registers and
7552 floating point/VSX registers. */
7554 bool
7555 direct_move_p (rtx op0, rtx op1)
7557 int regno0, regno1;
7559 if (!REG_P (op0) || !REG_P (op1))
7560 return false;
7562 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7563 return false;
7565 regno0 = REGNO (op0);
7566 regno1 = REGNO (op1);
7567 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7568 return false;
7570 if (INT_REGNO_P (regno0))
7571 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7573 else if (INT_REGNO_P (regno1))
7575 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7576 return true;
7578 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7579 return true;
7582 return false;
7585 /* Return true if the OFFSET is valid for the quad address instructions that
7586 use d-form (register + offset) addressing. */
7588 static inline bool
7589 quad_address_offset_p (HOST_WIDE_INT offset)
7591 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7594 /* Return true if the ADDR is an acceptable address for a quad memory
7595 operation of mode MODE (either LQ/STQ for general purpose registers, or
7596 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7597 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7598 3.0 LXV/STXV instruction. */
7600 bool
7601 quad_address_p (rtx addr, machine_mode mode, bool strict)
7603 rtx op0, op1;
7605 if (GET_MODE_SIZE (mode) != 16)
7606 return false;
7608 if (legitimate_indirect_address_p (addr, strict))
7609 return true;
7611 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
7612 return false;
7614 if (GET_CODE (addr) != PLUS)
7615 return false;
7617 op0 = XEXP (addr, 0);
7618 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7619 return false;
7621 op1 = XEXP (addr, 1);
7622 if (!CONST_INT_P (op1))
7623 return false;
7625 return quad_address_offset_p (INTVAL (op1));
7628 /* Return true if this is a load or store quad operation. This function does
7629 not handle the atomic quad memory instructions. */
7631 bool
7632 quad_load_store_p (rtx op0, rtx op1)
7634 bool ret;
7636 if (!TARGET_QUAD_MEMORY)
7637 ret = false;
7639 else if (REG_P (op0) && MEM_P (op1))
7640 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7641 && quad_memory_operand (op1, GET_MODE (op1))
7642 && !reg_overlap_mentioned_p (op0, op1));
7644 else if (MEM_P (op0) && REG_P (op1))
7645 ret = (quad_memory_operand (op0, GET_MODE (op0))
7646 && quad_int_reg_operand (op1, GET_MODE (op1)));
7648 else
7649 ret = false;
7651 if (TARGET_DEBUG_ADDR)
7653 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7654 ret ? "true" : "false");
7655 debug_rtx (gen_rtx_SET (op0, op1));
7658 return ret;
7661 /* Given an address, return a constant offset term if one exists. */
7663 static rtx
7664 address_offset (rtx op)
7666 if (GET_CODE (op) == PRE_INC
7667 || GET_CODE (op) == PRE_DEC)
7668 op = XEXP (op, 0);
7669 else if (GET_CODE (op) == PRE_MODIFY
7670 || GET_CODE (op) == LO_SUM)
7671 op = XEXP (op, 1);
7673 if (GET_CODE (op) == CONST)
7674 op = XEXP (op, 0);
7676 if (GET_CODE (op) == PLUS)
7677 op = XEXP (op, 1);
7679 if (CONST_INT_P (op))
7680 return op;
7682 return NULL_RTX;
7685 /* Return true if the MEM operand is a memory operand suitable for use
7686 with a (full width, possibly multiple) gpr load/store. On
7687 powerpc64 this means the offset must be divisible by 4.
7688 Implements 'Y' constraint.
7690 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7691 a constraint function we know the operand has satisfied a suitable
7692 memory predicate. Also accept some odd rtl generated by reload
7693 (see rs6000_legitimize_reload_address for various forms). It is
7694 important that reload rtl be accepted by appropriate constraints
7695 but not by the operand predicate.
7697 Offsetting a lo_sum should not be allowed, except where we know by
7698 alignment that a 32k boundary is not crossed, but see the ???
7699 comment in rs6000_legitimize_reload_address. Note that by
7700 "offsetting" here we mean a further offset to access parts of the
7701 MEM. It's fine to have a lo_sum where the inner address is offset
7702 from a sym, since the same sym+offset will appear in the high part
7703 of the address calculation. */
7705 bool
7706 mem_operand_gpr (rtx op, machine_mode mode)
7708 unsigned HOST_WIDE_INT offset;
7709 int extra;
7710 rtx addr = XEXP (op, 0);
7712 op = address_offset (addr);
7713 if (op == NULL_RTX)
7714 return true;
7716 offset = INTVAL (op);
7717 if (TARGET_POWERPC64 && (offset & 3) != 0)
7718 return false;
7720 if (mode_supports_vsx_dform_quad (mode)
7721 && !quad_address_offset_p (offset))
7722 return false;
7724 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7725 if (extra < 0)
7726 extra = 0;
7728 if (GET_CODE (addr) == LO_SUM)
7729 /* For lo_sum addresses, we must allow any offset except one that
7730 causes a wrap, so test only the low 16 bits. */
7731 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7733 return offset + 0x8000 < 0x10000u - extra;
7736 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7738 static bool
7739 reg_offset_addressing_ok_p (machine_mode mode)
7741 switch (mode)
7743 case V16QImode:
7744 case V8HImode:
7745 case V4SFmode:
7746 case V4SImode:
7747 case V2DFmode:
7748 case V2DImode:
7749 case V1TImode:
7750 case TImode:
7751 case TFmode:
7752 case KFmode:
7753 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7754 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7755 a vector mode, if we want to use the VSX registers to move it around,
7756 we need to restrict ourselves to reg+reg addressing. Similarly for
7757 IEEE 128-bit floating point that is passed in a single vector
7758 register. */
7759 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7760 return mode_supports_vsx_dform_quad (mode);
7761 break;
7763 case V4HImode:
7764 case V2SImode:
7765 case V1DImode:
7766 case V2SFmode:
7767 /* Paired vector modes. Only reg+reg addressing is valid. */
7768 if (TARGET_PAIRED_FLOAT)
7769 return false;
7770 break;
7772 case SDmode:
7773 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7774 addressing for the LFIWZX and STFIWX instructions. */
7775 if (TARGET_NO_SDMODE_STACK)
7776 return false;
7777 break;
7779 default:
7780 break;
7783 return true;
7786 static bool
7787 virtual_stack_registers_memory_p (rtx op)
7789 int regnum;
7791 if (GET_CODE (op) == REG)
7792 regnum = REGNO (op);
7794 else if (GET_CODE (op) == PLUS
7795 && GET_CODE (XEXP (op, 0)) == REG
7796 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7797 regnum = REGNO (XEXP (op, 0));
7799 else
7800 return false;
7802 return (regnum >= FIRST_VIRTUAL_REGISTER
7803 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7806 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7807 is known to not straddle a 32k boundary. This function is used
7808 to determine whether -mcmodel=medium code can use TOC pointer
7809 relative addressing for OP. This means the alignment of the TOC
7810 pointer must also be taken into account, and unfortunately that is
7811 only 8 bytes. */
7813 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7814 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7815 #endif
7817 static bool
7818 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7819 machine_mode mode)
7821 tree decl;
7822 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7824 if (GET_CODE (op) != SYMBOL_REF)
7825 return false;
7827 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7828 SYMBOL_REF. */
7829 if (mode_supports_vsx_dform_quad (mode))
7830 return false;
7832 dsize = GET_MODE_SIZE (mode);
7833 decl = SYMBOL_REF_DECL (op);
7834 if (!decl)
7836 if (dsize == 0)
7837 return false;
7839 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7840 replacing memory addresses with an anchor plus offset. We
7841 could find the decl by rummaging around in the block->objects
7842 VEC for the given offset but that seems like too much work. */
7843 dalign = BITS_PER_UNIT;
7844 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7845 && SYMBOL_REF_ANCHOR_P (op)
7846 && SYMBOL_REF_BLOCK (op) != NULL)
7848 struct object_block *block = SYMBOL_REF_BLOCK (op);
7850 dalign = block->alignment;
7851 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7853 else if (CONSTANT_POOL_ADDRESS_P (op))
7855 /* It would be nice to have get_pool_align().. */
7856 machine_mode cmode = get_pool_mode (op);
7858 dalign = GET_MODE_ALIGNMENT (cmode);
7861 else if (DECL_P (decl))
7863 dalign = DECL_ALIGN (decl);
7865 if (dsize == 0)
7867 /* Allow BLKmode when the entire object is known to not
7868 cross a 32k boundary. */
7869 if (!DECL_SIZE_UNIT (decl))
7870 return false;
7872 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7873 return false;
7875 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7876 if (dsize > 32768)
7877 return false;
7879 dalign /= BITS_PER_UNIT;
7880 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7881 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7882 return dalign >= dsize;
7885 else
7886 gcc_unreachable ();
7888 /* Find how many bits of the alignment we know for this access. */
7889 dalign /= BITS_PER_UNIT;
7890 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7891 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7892 mask = dalign - 1;
7893 lsb = offset & -offset;
7894 mask &= lsb - 1;
7895 dalign = mask + 1;
7897 return dalign >= dsize;
7900 static bool
7901 constant_pool_expr_p (rtx op)
7903 rtx base, offset;
7905 split_const (op, &base, &offset);
7906 return (GET_CODE (base) == SYMBOL_REF
7907 && CONSTANT_POOL_ADDRESS_P (base)
7908 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7911 static const_rtx tocrel_base, tocrel_offset;
7913 /* Return true if OP is a toc pointer relative address (the output
7914 of create_TOC_reference). If STRICT, do not match high part or
7915 non-split -mcmodel=large/medium toc pointer relative addresses. */
7917 bool
7918 toc_relative_expr_p (const_rtx op, bool strict)
7920 if (!TARGET_TOC)
7921 return false;
7923 if (TARGET_CMODEL != CMODEL_SMALL)
7925 /* Only match the low part. */
7926 if (GET_CODE (op) == LO_SUM
7927 && REG_P (XEXP (op, 0))
7928 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
7929 op = XEXP (op, 1);
7930 else if (strict)
7931 return false;
7934 tocrel_base = op;
7935 tocrel_offset = const0_rtx;
7936 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7938 tocrel_base = XEXP (op, 0);
7939 tocrel_offset = XEXP (op, 1);
7942 return (GET_CODE (tocrel_base) == UNSPEC
7943 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
7946 /* Return true if X is a constant pool address, and also for cmodel=medium
7947 if X is a toc-relative address known to be offsettable within MODE. */
7949 bool
7950 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7951 bool strict)
7953 return (toc_relative_expr_p (x, strict)
7954 && (TARGET_CMODEL != CMODEL_MEDIUM
7955 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7956 || mode == QImode
7957 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7958 INTVAL (tocrel_offset), mode)));
7961 static bool
7962 legitimate_small_data_p (machine_mode mode, rtx x)
7964 return (DEFAULT_ABI == ABI_V4
7965 && !flag_pic && !TARGET_TOC
7966 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7967 && small_data_operand (x, mode));
7970 /* SPE offset addressing is limited to 5-bits worth of double words. */
7971 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
7973 bool
7974 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7975 bool strict, bool worst_case)
7977 unsigned HOST_WIDE_INT offset;
7978 unsigned int extra;
7980 if (GET_CODE (x) != PLUS)
7981 return false;
7982 if (!REG_P (XEXP (x, 0)))
7983 return false;
7984 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7985 return false;
7986 if (mode_supports_vsx_dform_quad (mode))
7987 return quad_address_p (x, mode, strict);
7988 if (!reg_offset_addressing_ok_p (mode))
7989 return virtual_stack_registers_memory_p (x);
7990 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7991 return true;
7992 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
7993 return false;
7995 offset = INTVAL (XEXP (x, 1));
7996 extra = 0;
7997 switch (mode)
7999 case V4HImode:
8000 case V2SImode:
8001 case V1DImode:
8002 case V2SFmode:
8003 /* SPE vector modes. */
8004 return SPE_CONST_OFFSET_OK (offset);
8006 case DFmode:
8007 case DDmode:
8008 case DImode:
8009 /* On e500v2, we may have:
8011 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
8013 Which gets addressed with evldd instructions. */
8014 if (TARGET_E500_DOUBLE)
8015 return SPE_CONST_OFFSET_OK (offset);
8017 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8018 addressing. */
8019 if (VECTOR_MEM_VSX_P (mode))
8020 return false;
8022 if (!worst_case)
8023 break;
8024 if (!TARGET_POWERPC64)
8025 extra = 4;
8026 else if (offset & 3)
8027 return false;
8028 break;
8030 case TFmode:
8031 case IFmode:
8032 case KFmode:
8033 if (TARGET_E500_DOUBLE)
8034 return (SPE_CONST_OFFSET_OK (offset)
8035 && SPE_CONST_OFFSET_OK (offset + 8));
8036 /* fall through */
8038 case TDmode:
8039 case TImode:
8040 case PTImode:
8041 extra = 8;
8042 if (!worst_case)
8043 break;
8044 if (!TARGET_POWERPC64)
8045 extra = 12;
8046 else if (offset & 3)
8047 return false;
8048 break;
8050 default:
8051 break;
8054 offset += 0x8000;
8055 return offset < 0x10000 - extra;
8058 bool
8059 legitimate_indexed_address_p (rtx x, int strict)
8061 rtx op0, op1;
8063 if (GET_CODE (x) != PLUS)
8064 return false;
8066 op0 = XEXP (x, 0);
8067 op1 = XEXP (x, 1);
8069 /* Recognize the rtl generated by reload which we know will later be
8070 replaced with proper base and index regs. */
8071 if (!strict
8072 && reload_in_progress
8073 && (REG_P (op0) || GET_CODE (op0) == PLUS)
8074 && REG_P (op1))
8075 return true;
8077 return (REG_P (op0) && REG_P (op1)
8078 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8079 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8080 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8081 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8084 bool
8085 avoiding_indexed_address_p (machine_mode mode)
8087 /* Avoid indexed addressing for modes that have non-indexed
8088 load/store instruction forms. */
8089 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8092 bool
8093 legitimate_indirect_address_p (rtx x, int strict)
8095 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8098 bool
8099 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8101 if (!TARGET_MACHO || !flag_pic
8102 || mode != SImode || GET_CODE (x) != MEM)
8103 return false;
8104 x = XEXP (x, 0);
8106 if (GET_CODE (x) != LO_SUM)
8107 return false;
8108 if (GET_CODE (XEXP (x, 0)) != REG)
8109 return false;
8110 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8111 return false;
8112 x = XEXP (x, 1);
8114 return CONSTANT_P (x);
8117 static bool
8118 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8120 if (GET_CODE (x) != LO_SUM)
8121 return false;
8122 if (GET_CODE (XEXP (x, 0)) != REG)
8123 return false;
8124 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8125 return false;
8126 /* quad word addresses are restricted, and we can't use LO_SUM. */
8127 if (mode_supports_vsx_dform_quad (mode))
8128 return false;
8129 /* Restrict addressing for DI because of our SUBREG hackery. */
8130 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
8131 return false;
8132 x = XEXP (x, 1);
8134 if (TARGET_ELF || TARGET_MACHO)
8136 bool large_toc_ok;
8138 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8139 return false;
8140 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8141 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8142 recognizes some LO_SUM addresses as valid although this
8143 function says opposite. In most cases, LRA through different
8144 transformations can generate correct code for address reloads.
8145 It can not manage only some LO_SUM cases. So we need to add
8146 code analogous to one in rs6000_legitimize_reload_address for
8147 LOW_SUM here saying that some addresses are still valid. */
8148 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8149 && small_toc_ref (x, VOIDmode));
8150 if (TARGET_TOC && ! large_toc_ok)
8151 return false;
8152 if (GET_MODE_NUNITS (mode) != 1)
8153 return false;
8154 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8155 && !(/* ??? Assume floating point reg based on mode? */
8156 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
8157 && (mode == DFmode || mode == DDmode)))
8158 return false;
8160 return CONSTANT_P (x) || large_toc_ok;
8163 return false;
8167 /* Try machine-dependent ways of modifying an illegitimate address
8168 to be legitimate. If we find one, return the new, valid address.
8169 This is used from only one place: `memory_address' in explow.c.
8171 OLDX is the address as it was before break_out_memory_refs was
8172 called. In some cases it is useful to look at this to decide what
8173 needs to be done.
8175 It is always safe for this function to do nothing. It exists to
8176 recognize opportunities to optimize the output.
8178 On RS/6000, first check for the sum of a register with a constant
8179 integer that is out of range. If so, generate code to add the
8180 constant with the low-order 16 bits masked to the register and force
8181 this result into another register (this can be done with `cau').
8182 Then generate an address of REG+(CONST&0xffff), allowing for the
8183 possibility of bit 16 being a one.
8185 Then check for the sum of a register and something not constant, try to
8186 load the other things into a register and return the sum. */
8188 static rtx
8189 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8190 machine_mode mode)
8192 unsigned int extra;
8194 if (!reg_offset_addressing_ok_p (mode)
8195 || mode_supports_vsx_dform_quad (mode))
8197 if (virtual_stack_registers_memory_p (x))
8198 return x;
8200 /* In theory we should not be seeing addresses of the form reg+0,
8201 but just in case it is generated, optimize it away. */
8202 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8203 return force_reg (Pmode, XEXP (x, 0));
8205 /* For TImode with load/store quad, restrict addresses to just a single
8206 pointer, so it works with both GPRs and VSX registers. */
8207 /* Make sure both operands are registers. */
8208 else if (GET_CODE (x) == PLUS
8209 && (mode != TImode || !TARGET_QUAD_MEMORY))
8210 return gen_rtx_PLUS (Pmode,
8211 force_reg (Pmode, XEXP (x, 0)),
8212 force_reg (Pmode, XEXP (x, 1)));
8213 else
8214 return force_reg (Pmode, x);
8216 if (GET_CODE (x) == SYMBOL_REF)
8218 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8219 if (model != 0)
8220 return rs6000_legitimize_tls_address (x, model);
8223 extra = 0;
8224 switch (mode)
8226 case TFmode:
8227 case TDmode:
8228 case TImode:
8229 case PTImode:
8230 case IFmode:
8231 case KFmode:
8232 /* As in legitimate_offset_address_p we do not assume
8233 worst-case. The mode here is just a hint as to the registers
8234 used. A TImode is usually in gprs, but may actually be in
8235 fprs. Leave worst-case scenario for reload to handle via
8236 insn constraints. PTImode is only GPRs. */
8237 extra = 8;
8238 break;
8239 default:
8240 break;
8243 if (GET_CODE (x) == PLUS
8244 && GET_CODE (XEXP (x, 0)) == REG
8245 && GET_CODE (XEXP (x, 1)) == CONST_INT
8246 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8247 >= 0x10000 - extra)
8248 && !(SPE_VECTOR_MODE (mode)
8249 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
8251 HOST_WIDE_INT high_int, low_int;
8252 rtx sum;
8253 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8254 if (low_int >= 0x8000 - extra)
8255 low_int = 0;
8256 high_int = INTVAL (XEXP (x, 1)) - low_int;
8257 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8258 GEN_INT (high_int)), 0);
8259 return plus_constant (Pmode, sum, low_int);
8261 else if (GET_CODE (x) == PLUS
8262 && GET_CODE (XEXP (x, 0)) == REG
8263 && GET_CODE (XEXP (x, 1)) != CONST_INT
8264 && GET_MODE_NUNITS (mode) == 1
8265 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8266 || (/* ??? Assume floating point reg based on mode? */
8267 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
8268 && (mode == DFmode || mode == DDmode)))
8269 && !avoiding_indexed_address_p (mode))
8271 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8272 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8274 else if (SPE_VECTOR_MODE (mode)
8275 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
8277 if (mode == DImode)
8278 return x;
8279 /* We accept [reg + reg] and [reg + OFFSET]. */
8281 if (GET_CODE (x) == PLUS)
8283 rtx op1 = XEXP (x, 0);
8284 rtx op2 = XEXP (x, 1);
8285 rtx y;
8287 op1 = force_reg (Pmode, op1);
8289 if (GET_CODE (op2) != REG
8290 && (GET_CODE (op2) != CONST_INT
8291 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
8292 || (GET_MODE_SIZE (mode) > 8
8293 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
8294 op2 = force_reg (Pmode, op2);
8296 /* We can't always do [reg + reg] for these, because [reg +
8297 reg + offset] is not a legitimate addressing mode. */
8298 y = gen_rtx_PLUS (Pmode, op1, op2);
8300 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8301 return force_reg (Pmode, y);
8302 else
8303 return y;
8306 return force_reg (Pmode, x);
8308 else if ((TARGET_ELF
8309 #if TARGET_MACHO
8310 || !MACHO_DYNAMIC_NO_PIC_P
8311 #endif
8313 && TARGET_32BIT
8314 && TARGET_NO_TOC
8315 && ! flag_pic
8316 && GET_CODE (x) != CONST_INT
8317 && GET_CODE (x) != CONST_WIDE_INT
8318 && GET_CODE (x) != CONST_DOUBLE
8319 && CONSTANT_P (x)
8320 && GET_MODE_NUNITS (mode) == 1
8321 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8322 || (/* ??? Assume floating point reg based on mode? */
8323 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
8324 && (mode == DFmode || mode == DDmode))))
8326 rtx reg = gen_reg_rtx (Pmode);
8327 if (TARGET_ELF)
8328 emit_insn (gen_elf_high (reg, x));
8329 else
8330 emit_insn (gen_macho_high (reg, x));
8331 return gen_rtx_LO_SUM (Pmode, reg, x);
8333 else if (TARGET_TOC
8334 && GET_CODE (x) == SYMBOL_REF
8335 && constant_pool_expr_p (x)
8336 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8337 return create_TOC_reference (x, NULL_RTX);
8338 else
8339 return x;
8342 /* Debug version of rs6000_legitimize_address. */
8343 static rtx
8344 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8346 rtx ret;
8347 rtx_insn *insns;
8349 start_sequence ();
8350 ret = rs6000_legitimize_address (x, oldx, mode);
8351 insns = get_insns ();
8352 end_sequence ();
8354 if (ret != x)
8356 fprintf (stderr,
8357 "\nrs6000_legitimize_address: mode %s, old code %s, "
8358 "new code %s, modified\n",
8359 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8360 GET_RTX_NAME (GET_CODE (ret)));
8362 fprintf (stderr, "Original address:\n");
8363 debug_rtx (x);
8365 fprintf (stderr, "oldx:\n");
8366 debug_rtx (oldx);
8368 fprintf (stderr, "New address:\n");
8369 debug_rtx (ret);
8371 if (insns)
8373 fprintf (stderr, "Insns added:\n");
8374 debug_rtx_list (insns, 20);
8377 else
8379 fprintf (stderr,
8380 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8381 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8383 debug_rtx (x);
8386 if (insns)
8387 emit_insn (insns);
8389 return ret;
8392 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8393 We need to emit DTP-relative relocations. */
8395 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8396 static void
8397 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8399 switch (size)
8401 case 4:
8402 fputs ("\t.long\t", file);
8403 break;
8404 case 8:
8405 fputs (DOUBLE_INT_ASM_OP, file);
8406 break;
8407 default:
8408 gcc_unreachable ();
8410 output_addr_const (file, x);
8411 if (TARGET_ELF)
8412 fputs ("@dtprel+0x8000", file);
8413 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8415 switch (SYMBOL_REF_TLS_MODEL (x))
8417 case 0:
8418 break;
8419 case TLS_MODEL_LOCAL_EXEC:
8420 fputs ("@le", file);
8421 break;
8422 case TLS_MODEL_INITIAL_EXEC:
8423 fputs ("@ie", file);
8424 break;
8425 case TLS_MODEL_GLOBAL_DYNAMIC:
8426 case TLS_MODEL_LOCAL_DYNAMIC:
8427 fputs ("@m", file);
8428 break;
8429 default:
8430 gcc_unreachable ();
8435 /* Return true if X is a symbol that refers to real (rather than emulated)
8436 TLS. */
8438 static bool
8439 rs6000_real_tls_symbol_ref_p (rtx x)
8441 return (GET_CODE (x) == SYMBOL_REF
8442 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8445 /* In the name of slightly smaller debug output, and to cater to
8446 general assembler lossage, recognize various UNSPEC sequences
8447 and turn them back into a direct symbol reference. */
8449 static rtx
8450 rs6000_delegitimize_address (rtx orig_x)
8452 rtx x, y, offset;
8454 orig_x = delegitimize_mem_from_attrs (orig_x);
8455 x = orig_x;
8456 if (MEM_P (x))
8457 x = XEXP (x, 0);
8459 y = x;
8460 if (TARGET_CMODEL != CMODEL_SMALL
8461 && GET_CODE (y) == LO_SUM)
8462 y = XEXP (y, 1);
8464 offset = NULL_RTX;
8465 if (GET_CODE (y) == PLUS
8466 && GET_MODE (y) == Pmode
8467 && CONST_INT_P (XEXP (y, 1)))
8469 offset = XEXP (y, 1);
8470 y = XEXP (y, 0);
8473 if (GET_CODE (y) == UNSPEC
8474 && XINT (y, 1) == UNSPEC_TOCREL)
8476 y = XVECEXP (y, 0, 0);
8478 #ifdef HAVE_AS_TLS
8479 /* Do not associate thread-local symbols with the original
8480 constant pool symbol. */
8481 if (TARGET_XCOFF
8482 && GET_CODE (y) == SYMBOL_REF
8483 && CONSTANT_POOL_ADDRESS_P (y)
8484 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8485 return orig_x;
8486 #endif
8488 if (offset != NULL_RTX)
8489 y = gen_rtx_PLUS (Pmode, y, offset);
8490 if (!MEM_P (orig_x))
8491 return y;
8492 else
8493 return replace_equiv_address_nv (orig_x, y);
8496 if (TARGET_MACHO
8497 && GET_CODE (orig_x) == LO_SUM
8498 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8500 y = XEXP (XEXP (orig_x, 1), 0);
8501 if (GET_CODE (y) == UNSPEC
8502 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8503 return XVECEXP (y, 0, 0);
8506 return orig_x;
8509 /* Return true if X shouldn't be emitted into the debug info.
8510 The linker doesn't like .toc section references from
8511 .debug_* sections, so reject .toc section symbols. */
8513 static bool
8514 rs6000_const_not_ok_for_debug_p (rtx x)
8516 if (GET_CODE (x) == SYMBOL_REF
8517 && CONSTANT_POOL_ADDRESS_P (x))
8519 rtx c = get_pool_constant (x);
8520 machine_mode cmode = get_pool_mode (x);
8521 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8522 return true;
8525 return false;
8528 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8530 static GTY(()) rtx rs6000_tls_symbol;
8531 static rtx
8532 rs6000_tls_get_addr (void)
8534 if (!rs6000_tls_symbol)
8535 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8537 return rs6000_tls_symbol;
8540 /* Construct the SYMBOL_REF for TLS GOT references. */
8542 static GTY(()) rtx rs6000_got_symbol;
8543 static rtx
8544 rs6000_got_sym (void)
8546 if (!rs6000_got_symbol)
8548 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8549 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8550 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8553 return rs6000_got_symbol;
8556 /* AIX Thread-Local Address support. */
8558 static rtx
8559 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8561 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8562 const char *name;
8563 char *tlsname;
8565 name = XSTR (addr, 0);
8566 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8567 or the symbol will be in TLS private data section. */
8568 if (name[strlen (name) - 1] != ']'
8569 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8570 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8572 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8573 strcpy (tlsname, name);
8574 strcat (tlsname,
8575 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8576 tlsaddr = copy_rtx (addr);
8577 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8579 else
8580 tlsaddr = addr;
8582 /* Place addr into TOC constant pool. */
8583 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8585 /* Output the TOC entry and create the MEM referencing the value. */
8586 if (constant_pool_expr_p (XEXP (sym, 0))
8587 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8589 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8590 mem = gen_const_mem (Pmode, tocref);
8591 set_mem_alias_set (mem, get_TOC_alias_set ());
8593 else
8594 return sym;
8596 /* Use global-dynamic for local-dynamic. */
8597 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8598 || model == TLS_MODEL_LOCAL_DYNAMIC)
8600 /* Create new TOC reference for @m symbol. */
8601 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8602 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8603 strcpy (tlsname, "*LCM");
8604 strcat (tlsname, name + 3);
8605 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8606 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8607 tocref = create_TOC_reference (modaddr, NULL_RTX);
8608 rtx modmem = gen_const_mem (Pmode, tocref);
8609 set_mem_alias_set (modmem, get_TOC_alias_set ());
8611 rtx modreg = gen_reg_rtx (Pmode);
8612 emit_insn (gen_rtx_SET (modreg, modmem));
8614 tmpreg = gen_reg_rtx (Pmode);
8615 emit_insn (gen_rtx_SET (tmpreg, mem));
8617 dest = gen_reg_rtx (Pmode);
8618 if (TARGET_32BIT)
8619 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8620 else
8621 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8622 return dest;
8624 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8625 else if (TARGET_32BIT)
8627 tlsreg = gen_reg_rtx (SImode);
8628 emit_insn (gen_tls_get_tpointer (tlsreg));
8630 else
8631 tlsreg = gen_rtx_REG (DImode, 13);
8633 /* Load the TOC value into temporary register. */
8634 tmpreg = gen_reg_rtx (Pmode);
8635 emit_insn (gen_rtx_SET (tmpreg, mem));
8636 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8637 gen_rtx_MINUS (Pmode, addr, tlsreg));
8639 /* Add TOC symbol value to TLS pointer. */
8640 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8642 return dest;
8645 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8646 this (thread-local) address. */
8648 static rtx
8649 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8651 rtx dest, insn;
8653 if (TARGET_XCOFF)
8654 return rs6000_legitimize_tls_address_aix (addr, model);
8656 dest = gen_reg_rtx (Pmode);
8657 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8659 rtx tlsreg;
8661 if (TARGET_64BIT)
8663 tlsreg = gen_rtx_REG (Pmode, 13);
8664 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8666 else
8668 tlsreg = gen_rtx_REG (Pmode, 2);
8669 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8671 emit_insn (insn);
8673 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8675 rtx tlsreg, tmp;
8677 tmp = gen_reg_rtx (Pmode);
8678 if (TARGET_64BIT)
8680 tlsreg = gen_rtx_REG (Pmode, 13);
8681 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8683 else
8685 tlsreg = gen_rtx_REG (Pmode, 2);
8686 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8688 emit_insn (insn);
8689 if (TARGET_64BIT)
8690 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8691 else
8692 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8693 emit_insn (insn);
8695 else
8697 rtx r3, got, tga, tmp1, tmp2, call_insn;
8699 /* We currently use relocations like @got@tlsgd for tls, which
8700 means the linker will handle allocation of tls entries, placing
8701 them in the .got section. So use a pointer to the .got section,
8702 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8703 or to secondary GOT sections used by 32-bit -fPIC. */
8704 if (TARGET_64BIT)
8705 got = gen_rtx_REG (Pmode, 2);
8706 else
8708 if (flag_pic == 1)
8709 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8710 else
8712 rtx gsym = rs6000_got_sym ();
8713 got = gen_reg_rtx (Pmode);
8714 if (flag_pic == 0)
8715 rs6000_emit_move (got, gsym, Pmode);
8716 else
8718 rtx mem, lab, last;
8720 tmp1 = gen_reg_rtx (Pmode);
8721 tmp2 = gen_reg_rtx (Pmode);
8722 mem = gen_const_mem (Pmode, tmp1);
8723 lab = gen_label_rtx ();
8724 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8725 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8726 if (TARGET_LINK_STACK)
8727 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8728 emit_move_insn (tmp2, mem);
8729 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8730 set_unique_reg_note (last, REG_EQUAL, gsym);
8735 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8737 tga = rs6000_tls_get_addr ();
8738 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8739 1, const0_rtx, Pmode);
8741 r3 = gen_rtx_REG (Pmode, 3);
8742 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8744 if (TARGET_64BIT)
8745 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
8746 else
8747 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
8749 else if (DEFAULT_ABI == ABI_V4)
8750 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
8751 else
8752 gcc_unreachable ();
8753 call_insn = last_call_insn ();
8754 PATTERN (call_insn) = insn;
8755 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8756 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8757 pic_offset_table_rtx);
8759 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8761 tga = rs6000_tls_get_addr ();
8762 tmp1 = gen_reg_rtx (Pmode);
8763 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8764 1, const0_rtx, Pmode);
8766 r3 = gen_rtx_REG (Pmode, 3);
8767 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8769 if (TARGET_64BIT)
8770 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
8771 else
8772 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
8774 else if (DEFAULT_ABI == ABI_V4)
8775 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
8776 else
8777 gcc_unreachable ();
8778 call_insn = last_call_insn ();
8779 PATTERN (call_insn) = insn;
8780 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8781 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8782 pic_offset_table_rtx);
8784 if (rs6000_tls_size == 16)
8786 if (TARGET_64BIT)
8787 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8788 else
8789 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8791 else if (rs6000_tls_size == 32)
8793 tmp2 = gen_reg_rtx (Pmode);
8794 if (TARGET_64BIT)
8795 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8796 else
8797 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8798 emit_insn (insn);
8799 if (TARGET_64BIT)
8800 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8801 else
8802 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8804 else
8806 tmp2 = gen_reg_rtx (Pmode);
8807 if (TARGET_64BIT)
8808 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8809 else
8810 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8811 emit_insn (insn);
8812 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8814 emit_insn (insn);
8816 else
8818 /* IE, or 64-bit offset LE. */
8819 tmp2 = gen_reg_rtx (Pmode);
8820 if (TARGET_64BIT)
8821 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8822 else
8823 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8824 emit_insn (insn);
8825 if (TARGET_64BIT)
8826 insn = gen_tls_tls_64 (dest, tmp2, addr);
8827 else
8828 insn = gen_tls_tls_32 (dest, tmp2, addr);
8829 emit_insn (insn);
8833 return dest;
8836 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8838 static bool
8839 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8841 if (GET_CODE (x) == HIGH
8842 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8843 return true;
8845 /* A TLS symbol in the TOC cannot contain a sum. */
8846 if (GET_CODE (x) == CONST
8847 && GET_CODE (XEXP (x, 0)) == PLUS
8848 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8849 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8850 return true;
8852 /* Do not place an ELF TLS symbol in the constant pool. */
8853 return TARGET_ELF && tls_referenced_p (x);
8856 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8857 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8858 can be addressed relative to the toc pointer. */
8860 static bool
8861 use_toc_relative_ref (rtx sym, machine_mode mode)
8863 return ((constant_pool_expr_p (sym)
8864 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8865 get_pool_mode (sym)))
8866 || (TARGET_CMODEL == CMODEL_MEDIUM
8867 && SYMBOL_REF_LOCAL_P (sym)
8868 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8871 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8872 replace the input X, or the original X if no replacement is called for.
8873 The output parameter *WIN is 1 if the calling macro should goto WIN,
8874 0 if it should not.
8876 For RS/6000, we wish to handle large displacements off a base
8877 register by splitting the addend across an addiu/addis and the mem insn.
8878 This cuts number of extra insns needed from 3 to 1.
8880 On Darwin, we use this to generate code for floating point constants.
8881 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8882 The Darwin code is inside #if TARGET_MACHO because only then are the
8883 machopic_* functions defined. */
8884 static rtx
8885 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8886 int opnum, int type,
8887 int ind_levels ATTRIBUTE_UNUSED, int *win)
8889 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8890 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
8892 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8893 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8894 if (reg_offset_p
8895 && opnum == 1
8896 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8897 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8898 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8899 && TARGET_P9_VECTOR)
8900 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8901 && TARGET_P9_VECTOR)))
8902 reg_offset_p = false;
8904 /* We must recognize output that we have already generated ourselves. */
8905 if (GET_CODE (x) == PLUS
8906 && GET_CODE (XEXP (x, 0)) == PLUS
8907 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8908 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8909 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8911 if (TARGET_DEBUG_ADDR)
8913 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8914 debug_rtx (x);
8916 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8917 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8918 opnum, (enum reload_type) type);
8919 *win = 1;
8920 return x;
8923 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8924 if (GET_CODE (x) == LO_SUM
8925 && GET_CODE (XEXP (x, 0)) == HIGH)
8927 if (TARGET_DEBUG_ADDR)
8929 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8930 debug_rtx (x);
8932 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8933 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8934 opnum, (enum reload_type) type);
8935 *win = 1;
8936 return x;
8939 #if TARGET_MACHO
8940 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8941 && GET_CODE (x) == LO_SUM
8942 && GET_CODE (XEXP (x, 0)) == PLUS
8943 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8944 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8945 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8946 && machopic_operand_p (XEXP (x, 1)))
8948 /* Result of previous invocation of this function on Darwin
8949 floating point constant. */
8950 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8951 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8952 opnum, (enum reload_type) type);
8953 *win = 1;
8954 return x;
8956 #endif
8958 if (TARGET_CMODEL != CMODEL_SMALL
8959 && reg_offset_p
8960 && !quad_offset_p
8961 && small_toc_ref (x, VOIDmode))
8963 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8964 x = gen_rtx_LO_SUM (Pmode, hi, x);
8965 if (TARGET_DEBUG_ADDR)
8967 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8968 debug_rtx (x);
8970 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8971 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8972 opnum, (enum reload_type) type);
8973 *win = 1;
8974 return x;
8977 if (GET_CODE (x) == PLUS
8978 && REG_P (XEXP (x, 0))
8979 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
8980 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8981 && CONST_INT_P (XEXP (x, 1))
8982 && reg_offset_p
8983 && !SPE_VECTOR_MODE (mode)
8984 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
8985 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8987 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
8988 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
8989 HOST_WIDE_INT high
8990 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8992 /* Check for 32-bit overflow or quad addresses with one of the
8993 four least significant bits set. */
8994 if (high + low != val
8995 || (quad_offset_p && (low & 0xf)))
8997 *win = 0;
8998 return x;
9001 /* Reload the high part into a base reg; leave the low part
9002 in the mem directly. */
9004 x = gen_rtx_PLUS (GET_MODE (x),
9005 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9006 GEN_INT (high)),
9007 GEN_INT (low));
9009 if (TARGET_DEBUG_ADDR)
9011 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9012 debug_rtx (x);
9014 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9015 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9016 opnum, (enum reload_type) type);
9017 *win = 1;
9018 return x;
9021 if (GET_CODE (x) == SYMBOL_REF
9022 && reg_offset_p
9023 && !quad_offset_p
9024 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9025 && !SPE_VECTOR_MODE (mode)
9026 #if TARGET_MACHO
9027 && DEFAULT_ABI == ABI_DARWIN
9028 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9029 && machopic_symbol_defined_p (x)
9030 #else
9031 && DEFAULT_ABI == ABI_V4
9032 && !flag_pic
9033 #endif
9034 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9035 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9036 without fprs.
9037 ??? Assume floating point reg based on mode? This assumption is
9038 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9039 where reload ends up doing a DFmode load of a constant from
9040 mem using two gprs. Unfortunately, at this point reload
9041 hasn't yet selected regs so poking around in reload data
9042 won't help and even if we could figure out the regs reliably,
9043 we'd still want to allow this transformation when the mem is
9044 naturally aligned. Since we say the address is good here, we
9045 can't disable offsets from LO_SUMs in mem_operand_gpr.
9046 FIXME: Allow offset from lo_sum for other modes too, when
9047 mem is sufficiently aligned.
9049 Also disallow this if the type can go in VMX/Altivec registers, since
9050 those registers do not have d-form (reg+offset) address modes. */
9051 && !reg_addr[mode].scalar_in_vmx_p
9052 && mode != TFmode
9053 && mode != TDmode
9054 && mode != IFmode
9055 && mode != KFmode
9056 && (mode != TImode || !TARGET_VSX_TIMODE)
9057 && mode != PTImode
9058 && (mode != DImode || TARGET_POWERPC64)
9059 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9060 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
9062 #if TARGET_MACHO
9063 if (flag_pic)
9065 rtx offset = machopic_gen_offset (x);
9066 x = gen_rtx_LO_SUM (GET_MODE (x),
9067 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9068 gen_rtx_HIGH (Pmode, offset)), offset);
9070 else
9071 #endif
9072 x = gen_rtx_LO_SUM (GET_MODE (x),
9073 gen_rtx_HIGH (Pmode, x), x);
9075 if (TARGET_DEBUG_ADDR)
9077 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9078 debug_rtx (x);
9080 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9081 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9082 opnum, (enum reload_type) type);
9083 *win = 1;
9084 return x;
9087 /* Reload an offset address wrapped by an AND that represents the
9088 masking of the lower bits. Strip the outer AND and let reload
9089 convert the offset address into an indirect address. For VSX,
9090 force reload to create the address with an AND in a separate
9091 register, because we can't guarantee an altivec register will
9092 be used. */
9093 if (VECTOR_MEM_ALTIVEC_P (mode)
9094 && GET_CODE (x) == AND
9095 && GET_CODE (XEXP (x, 0)) == PLUS
9096 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9097 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9098 && GET_CODE (XEXP (x, 1)) == CONST_INT
9099 && INTVAL (XEXP (x, 1)) == -16)
9101 x = XEXP (x, 0);
9102 *win = 1;
9103 return x;
9106 if (TARGET_TOC
9107 && reg_offset_p
9108 && !quad_offset_p
9109 && GET_CODE (x) == SYMBOL_REF
9110 && use_toc_relative_ref (x, mode))
9112 x = create_TOC_reference (x, NULL_RTX);
9113 if (TARGET_CMODEL != CMODEL_SMALL)
9115 if (TARGET_DEBUG_ADDR)
9117 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9118 debug_rtx (x);
9120 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9121 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9122 opnum, (enum reload_type) type);
9124 *win = 1;
9125 return x;
9127 *win = 0;
9128 return x;
9131 /* Debug version of rs6000_legitimize_reload_address. */
9132 static rtx
9133 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9134 int opnum, int type,
9135 int ind_levels, int *win)
9137 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9138 ind_levels, win);
9139 fprintf (stderr,
9140 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9141 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9142 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9143 debug_rtx (x);
9145 if (x == ret)
9146 fprintf (stderr, "Same address returned\n");
9147 else if (!ret)
9148 fprintf (stderr, "NULL returned\n");
9149 else
9151 fprintf (stderr, "New address:\n");
9152 debug_rtx (ret);
9155 return ret;
9158 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9159 that is a valid memory address for an instruction.
9160 The MODE argument is the machine mode for the MEM expression
9161 that wants to use this address.
9163 On the RS/6000, there are four valid address: a SYMBOL_REF that
9164 refers to a constant pool entry of an address (or the sum of it
9165 plus a constant), a short (16-bit signed) constant plus a register,
9166 the sum of two registers, or a register indirect, possibly with an
9167 auto-increment. For DFmode, DDmode and DImode with a constant plus
9168 register, we must ensure that both words are addressable or PowerPC64
9169 with offset word aligned.
9171 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9172 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9173 because adjacent memory cells are accessed by adding word-sized offsets
9174 during assembly output. */
9175 static bool
9176 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9178 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9179 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9181 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9182 if (VECTOR_MEM_ALTIVEC_P (mode)
9183 && GET_CODE (x) == AND
9184 && GET_CODE (XEXP (x, 1)) == CONST_INT
9185 && INTVAL (XEXP (x, 1)) == -16)
9186 x = XEXP (x, 0);
9188 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9189 return 0;
9190 if (legitimate_indirect_address_p (x, reg_ok_strict))
9191 return 1;
9192 if (TARGET_UPDATE
9193 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9194 && mode_supports_pre_incdec_p (mode)
9195 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9196 return 1;
9197 /* Handle restricted vector d-form offsets in ISA 3.0. */
9198 if (quad_offset_p)
9200 if (quad_address_p (x, mode, reg_ok_strict))
9201 return 1;
9203 else if (virtual_stack_registers_memory_p (x))
9204 return 1;
9206 else if (reg_offset_p)
9208 if (legitimate_small_data_p (mode, x))
9209 return 1;
9210 if (legitimate_constant_pool_address_p (x, mode,
9211 reg_ok_strict || lra_in_progress))
9212 return 1;
9213 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9214 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9215 return 1;
9218 /* For TImode, if we have load/store quad and TImode in VSX registers, only
9219 allow register indirect addresses. This will allow the values to go in
9220 either GPRs or VSX registers without reloading. The vector types would
9221 tend to go into VSX registers, so we allow REG+REG, while TImode seems
9222 somewhat split, in that some uses are GPR based, and some VSX based. */
9223 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
9224 return 0;
9225 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9226 if (! reg_ok_strict
9227 && reg_offset_p
9228 && GET_CODE (x) == PLUS
9229 && GET_CODE (XEXP (x, 0)) == REG
9230 && (XEXP (x, 0) == virtual_stack_vars_rtx
9231 || XEXP (x, 0) == arg_pointer_rtx)
9232 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9233 return 1;
9234 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9235 return 1;
9236 if (!FLOAT128_2REG_P (mode)
9237 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
9238 || TARGET_POWERPC64
9239 || (mode != DFmode && mode != DDmode)
9240 || (TARGET_E500_DOUBLE && mode != DDmode))
9241 && (TARGET_POWERPC64 || mode != DImode)
9242 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9243 && mode != PTImode
9244 && !avoiding_indexed_address_p (mode)
9245 && legitimate_indexed_address_p (x, reg_ok_strict))
9246 return 1;
9247 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9248 && mode_supports_pre_modify_p (mode)
9249 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9250 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9251 reg_ok_strict, false)
9252 || (!avoiding_indexed_address_p (mode)
9253 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9254 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9255 return 1;
9256 if (reg_offset_p && !quad_offset_p
9257 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9258 return 1;
9259 return 0;
9262 /* Debug version of rs6000_legitimate_address_p. */
9263 static bool
9264 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9265 bool reg_ok_strict)
9267 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9268 fprintf (stderr,
9269 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9270 "strict = %d, reload = %s, code = %s\n",
9271 ret ? "true" : "false",
9272 GET_MODE_NAME (mode),
9273 reg_ok_strict,
9274 (reload_completed
9275 ? "after"
9276 : (reload_in_progress ? "progress" : "before")),
9277 GET_RTX_NAME (GET_CODE (x)));
9278 debug_rtx (x);
9280 return ret;
9283 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9285 static bool
9286 rs6000_mode_dependent_address_p (const_rtx addr,
9287 addr_space_t as ATTRIBUTE_UNUSED)
9289 return rs6000_mode_dependent_address_ptr (addr);
9292 /* Go to LABEL if ADDR (a legitimate address expression)
9293 has an effect that depends on the machine mode it is used for.
9295 On the RS/6000 this is true of all integral offsets (since AltiVec
9296 and VSX modes don't allow them) or is a pre-increment or decrement.
9298 ??? Except that due to conceptual problems in offsettable_address_p
9299 we can't really report the problems of integral offsets. So leave
9300 this assuming that the adjustable offset must be valid for the
9301 sub-words of a TFmode operand, which is what we had before. */
9303 static bool
9304 rs6000_mode_dependent_address (const_rtx addr)
9306 switch (GET_CODE (addr))
9308 case PLUS:
9309 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9310 is considered a legitimate address before reload, so there
9311 are no offset restrictions in that case. Note that this
9312 condition is safe in strict mode because any address involving
9313 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9314 been rejected as illegitimate. */
9315 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9316 && XEXP (addr, 0) != arg_pointer_rtx
9317 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9319 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9320 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9322 break;
9324 case LO_SUM:
9325 /* Anything in the constant pool is sufficiently aligned that
9326 all bytes have the same high part address. */
9327 return !legitimate_constant_pool_address_p (addr, QImode, false);
9329 /* Auto-increment cases are now treated generically in recog.c. */
9330 case PRE_MODIFY:
9331 return TARGET_UPDATE;
9333 /* AND is only allowed in Altivec loads. */
9334 case AND:
9335 return true;
9337 default:
9338 break;
9341 return false;
9344 /* Debug version of rs6000_mode_dependent_address. */
9345 static bool
9346 rs6000_debug_mode_dependent_address (const_rtx addr)
9348 bool ret = rs6000_mode_dependent_address (addr);
9350 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9351 ret ? "true" : "false");
9352 debug_rtx (addr);
9354 return ret;
9357 /* Implement FIND_BASE_TERM. */
9360 rs6000_find_base_term (rtx op)
9362 rtx base;
9364 base = op;
9365 if (GET_CODE (base) == CONST)
9366 base = XEXP (base, 0);
9367 if (GET_CODE (base) == PLUS)
9368 base = XEXP (base, 0);
9369 if (GET_CODE (base) == UNSPEC)
9370 switch (XINT (base, 1))
9372 case UNSPEC_TOCREL:
9373 case UNSPEC_MACHOPIC_OFFSET:
9374 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9375 for aliasing purposes. */
9376 return XVECEXP (base, 0, 0);
9379 return op;
9382 /* More elaborate version of recog's offsettable_memref_p predicate
9383 that works around the ??? note of rs6000_mode_dependent_address.
9384 In particular it accepts
9386 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9388 in 32-bit mode, that the recog predicate rejects. */
9390 static bool
9391 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9393 bool worst_case;
9395 if (!MEM_P (op))
9396 return false;
9398 /* First mimic offsettable_memref_p. */
9399 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9400 return true;
9402 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9403 the latter predicate knows nothing about the mode of the memory
9404 reference and, therefore, assumes that it is the largest supported
9405 mode (TFmode). As a consequence, legitimate offsettable memory
9406 references are rejected. rs6000_legitimate_offset_address_p contains
9407 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9408 at least with a little bit of help here given that we know the
9409 actual registers used. */
9410 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9411 || GET_MODE_SIZE (reg_mode) == 4);
9412 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9413 true, worst_case);
9416 /* Determine the reassociation width to be used in reassociate_bb.
9417 This takes into account how many parallel operations we
9418 can actually do of a given type, and also the latency.
9420 int add/sub 6/cycle
9421 mul 2/cycle
9422 vect add/sub/mul 2/cycle
9423 fp add/sub/mul 2/cycle
9424 dfp 1/cycle
9427 static int
9428 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9429 enum machine_mode mode)
9431 switch (rs6000_cpu)
9433 case PROCESSOR_POWER8:
9434 case PROCESSOR_POWER9:
9435 if (DECIMAL_FLOAT_MODE_P (mode))
9436 return 1;
9437 if (VECTOR_MODE_P (mode))
9438 return 4;
9439 if (INTEGRAL_MODE_P (mode))
9440 return opc == MULT_EXPR ? 4 : 6;
9441 if (FLOAT_MODE_P (mode))
9442 return 4;
9443 break;
9444 default:
9445 break;
9447 return 1;
9450 /* Change register usage conditional on target flags. */
9451 static void
9452 rs6000_conditional_register_usage (void)
9454 int i;
9456 if (TARGET_DEBUG_TARGET)
9457 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9459 /* Set MQ register fixed (already call_used) so that it will not be
9460 allocated. */
9461 fixed_regs[64] = 1;
9463 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9464 if (TARGET_64BIT)
9465 fixed_regs[13] = call_used_regs[13]
9466 = call_really_used_regs[13] = 1;
9468 /* Conditionally disable FPRs. */
9469 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
9470 for (i = 32; i < 64; i++)
9471 fixed_regs[i] = call_used_regs[i]
9472 = call_really_used_regs[i] = 1;
9474 /* The TOC register is not killed across calls in a way that is
9475 visible to the compiler. */
9476 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9477 call_really_used_regs[2] = 0;
9479 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9480 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9482 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9483 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9484 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9485 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9487 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9488 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9489 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9490 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9492 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9493 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9494 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9496 if (TARGET_SPE)
9498 global_regs[SPEFSCR_REGNO] = 1;
9499 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
9500 registers in prologues and epilogues. We no longer use r14
9501 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
9502 pool for link-compatibility with older versions of GCC. Once
9503 "old" code has died out, we can return r14 to the allocation
9504 pool. */
9505 fixed_regs[14]
9506 = call_used_regs[14]
9507 = call_really_used_regs[14] = 1;
9510 if (!TARGET_ALTIVEC && !TARGET_VSX)
9512 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9513 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9514 call_really_used_regs[VRSAVE_REGNO] = 1;
9517 if (TARGET_ALTIVEC || TARGET_VSX)
9518 global_regs[VSCR_REGNO] = 1;
9520 if (TARGET_ALTIVEC_ABI)
9522 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9523 call_used_regs[i] = call_really_used_regs[i] = 1;
9525 /* AIX reserves VR20:31 in non-extended ABI mode. */
9526 if (TARGET_XCOFF)
9527 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9528 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9533 /* Output insns to set DEST equal to the constant SOURCE as a series of
9534 lis, ori and shl instructions and return TRUE. */
9536 bool
9537 rs6000_emit_set_const (rtx dest, rtx source)
9539 machine_mode mode = GET_MODE (dest);
9540 rtx temp, set;
9541 rtx_insn *insn;
9542 HOST_WIDE_INT c;
9544 gcc_checking_assert (CONST_INT_P (source));
9545 c = INTVAL (source);
9546 switch (mode)
9548 case QImode:
9549 case HImode:
9550 emit_insn (gen_rtx_SET (dest, source));
9551 return true;
9553 case SImode:
9554 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9556 emit_insn (gen_rtx_SET (copy_rtx (temp),
9557 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9558 emit_insn (gen_rtx_SET (dest,
9559 gen_rtx_IOR (SImode, copy_rtx (temp),
9560 GEN_INT (c & 0xffff))));
9561 break;
9563 case DImode:
9564 if (!TARGET_POWERPC64)
9566 rtx hi, lo;
9568 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9569 DImode);
9570 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9571 DImode);
9572 emit_move_insn (hi, GEN_INT (c >> 32));
9573 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9574 emit_move_insn (lo, GEN_INT (c));
9576 else
9577 rs6000_emit_set_long_const (dest, c);
9578 break;
9580 default:
9581 gcc_unreachable ();
9584 insn = get_last_insn ();
9585 set = single_set (insn);
9586 if (! CONSTANT_P (SET_SRC (set)))
9587 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9589 return true;
9592 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9593 Output insns to set DEST equal to the constant C as a series of
9594 lis, ori and shl instructions. */
9596 static void
9597 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9599 rtx temp;
9600 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9602 ud1 = c & 0xffff;
9603 c = c >> 16;
9604 ud2 = c & 0xffff;
9605 c = c >> 16;
9606 ud3 = c & 0xffff;
9607 c = c >> 16;
9608 ud4 = c & 0xffff;
9610 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9611 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9612 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9614 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9615 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9617 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9619 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9620 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9621 if (ud1 != 0)
9622 emit_move_insn (dest,
9623 gen_rtx_IOR (DImode, copy_rtx (temp),
9624 GEN_INT (ud1)));
9626 else if (ud3 == 0 && ud4 == 0)
9628 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9630 gcc_assert (ud2 & 0x8000);
9631 emit_move_insn (copy_rtx (temp),
9632 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9633 if (ud1 != 0)
9634 emit_move_insn (copy_rtx (temp),
9635 gen_rtx_IOR (DImode, copy_rtx (temp),
9636 GEN_INT (ud1)));
9637 emit_move_insn (dest,
9638 gen_rtx_ZERO_EXTEND (DImode,
9639 gen_lowpart (SImode,
9640 copy_rtx (temp))));
9642 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9643 || (ud4 == 0 && ! (ud3 & 0x8000)))
9645 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9647 emit_move_insn (copy_rtx (temp),
9648 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9649 if (ud2 != 0)
9650 emit_move_insn (copy_rtx (temp),
9651 gen_rtx_IOR (DImode, copy_rtx (temp),
9652 GEN_INT (ud2)));
9653 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9654 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9655 GEN_INT (16)));
9656 if (ud1 != 0)
9657 emit_move_insn (dest,
9658 gen_rtx_IOR (DImode, copy_rtx (temp),
9659 GEN_INT (ud1)));
9661 else
9663 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9665 emit_move_insn (copy_rtx (temp),
9666 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9667 if (ud3 != 0)
9668 emit_move_insn (copy_rtx (temp),
9669 gen_rtx_IOR (DImode, copy_rtx (temp),
9670 GEN_INT (ud3)));
9672 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9673 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9674 GEN_INT (32)));
9675 if (ud2 != 0)
9676 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9677 gen_rtx_IOR (DImode, copy_rtx (temp),
9678 GEN_INT (ud2 << 16)));
9679 if (ud1 != 0)
9680 emit_move_insn (dest,
9681 gen_rtx_IOR (DImode, copy_rtx (temp),
9682 GEN_INT (ud1)));
9686 /* Helper for the following. Get rid of [r+r] memory refs
9687 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9689 static void
9690 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9692 if (reload_in_progress)
9693 return;
9695 if (GET_CODE (operands[0]) == MEM
9696 && GET_CODE (XEXP (operands[0], 0)) != REG
9697 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9698 GET_MODE (operands[0]), false))
9699 operands[0]
9700 = replace_equiv_address (operands[0],
9701 copy_addr_to_reg (XEXP (operands[0], 0)));
9703 if (GET_CODE (operands[1]) == MEM
9704 && GET_CODE (XEXP (operands[1], 0)) != REG
9705 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9706 GET_MODE (operands[1]), false))
9707 operands[1]
9708 = replace_equiv_address (operands[1],
9709 copy_addr_to_reg (XEXP (operands[1], 0)));
9712 /* Generate a vector of constants to permute MODE for a little-endian
9713 storage operation by swapping the two halves of a vector. */
9714 static rtvec
9715 rs6000_const_vec (machine_mode mode)
9717 int i, subparts;
9718 rtvec v;
9720 switch (mode)
9722 case V1TImode:
9723 subparts = 1;
9724 break;
9725 case V2DFmode:
9726 case V2DImode:
9727 subparts = 2;
9728 break;
9729 case V4SFmode:
9730 case V4SImode:
9731 subparts = 4;
9732 break;
9733 case V8HImode:
9734 subparts = 8;
9735 break;
9736 case V16QImode:
9737 subparts = 16;
9738 break;
9739 default:
9740 gcc_unreachable();
9743 v = rtvec_alloc (subparts);
9745 for (i = 0; i < subparts / 2; ++i)
9746 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9747 for (i = subparts / 2; i < subparts; ++i)
9748 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9750 return v;
9753 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
9754 for a VSX load or store operation. */
9756 rs6000_gen_le_vsx_permute (rtx source, machine_mode mode)
9758 /* Use ROTATE instead of VEC_SELECT on IEEE 128-bit floating point, and
9759 128-bit integers if they are allowed in VSX registers. */
9760 if (FLOAT128_VECTOR_P (mode) || mode == TImode)
9761 return gen_rtx_ROTATE (mode, source, GEN_INT (64));
9762 else
9764 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9765 return gen_rtx_VEC_SELECT (mode, source, par);
9769 /* Emit a little-endian load from vector memory location SOURCE to VSX
9770 register DEST in mode MODE. The load is done with two permuting
9771 insn's that represent an lxvd2x and xxpermdi. */
9772 void
9773 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9775 rtx tmp, permute_mem, permute_reg;
9777 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9778 V1TImode). */
9779 if (mode == TImode || mode == V1TImode)
9781 mode = V2DImode;
9782 dest = gen_lowpart (V2DImode, dest);
9783 source = adjust_address (source, V2DImode, 0);
9786 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9787 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
9788 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
9789 emit_insn (gen_rtx_SET (tmp, permute_mem));
9790 emit_insn (gen_rtx_SET (dest, permute_reg));
9793 /* Emit a little-endian store to vector memory location DEST from VSX
9794 register SOURCE in mode MODE. The store is done with two permuting
9795 insn's that represent an xxpermdi and an stxvd2x. */
9796 void
9797 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9799 rtx tmp, permute_src, permute_tmp;
9801 /* This should never be called during or after reload, because it does
9802 not re-permute the source register. It is intended only for use
9803 during expand. */
9804 gcc_assert (!reload_in_progress && !lra_in_progress && !reload_completed);
9806 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9807 V1TImode). */
9808 if (mode == TImode || mode == V1TImode)
9810 mode = V2DImode;
9811 dest = adjust_address (dest, V2DImode, 0);
9812 source = gen_lowpart (V2DImode, source);
9815 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9816 permute_src = rs6000_gen_le_vsx_permute (source, mode);
9817 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
9818 emit_insn (gen_rtx_SET (tmp, permute_src));
9819 emit_insn (gen_rtx_SET (dest, permute_tmp));
9822 /* Emit a sequence representing a little-endian VSX load or store,
9823 moving data from SOURCE to DEST in mode MODE. This is done
9824 separately from rs6000_emit_move to ensure it is called only
9825 during expand. LE VSX loads and stores introduced later are
9826 handled with a split. The expand-time RTL generation allows
9827 us to optimize away redundant pairs of register-permutes. */
9828 void
9829 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9831 gcc_assert (!BYTES_BIG_ENDIAN
9832 && VECTOR_MEM_VSX_P (mode)
9833 && !TARGET_P9_VECTOR
9834 && !gpr_or_gpr_p (dest, source)
9835 && (MEM_P (source) ^ MEM_P (dest)));
9837 if (MEM_P (source))
9839 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9840 rs6000_emit_le_vsx_load (dest, source, mode);
9842 else
9844 if (!REG_P (source))
9845 source = force_reg (mode, source);
9846 rs6000_emit_le_vsx_store (dest, source, mode);
9850 /* Emit a move from SOURCE to DEST in mode MODE. */
9851 void
9852 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9854 rtx operands[2];
9855 operands[0] = dest;
9856 operands[1] = source;
9858 if (TARGET_DEBUG_ADDR)
9860 fprintf (stderr,
9861 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
9862 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9863 GET_MODE_NAME (mode),
9864 reload_in_progress,
9865 reload_completed,
9866 can_create_pseudo_p ());
9867 debug_rtx (dest);
9868 fprintf (stderr, "source:\n");
9869 debug_rtx (source);
9872 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
9873 if (CONST_WIDE_INT_P (operands[1])
9874 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9876 /* This should be fixed with the introduction of CONST_WIDE_INT. */
9877 gcc_unreachable ();
9880 /* Check if GCC is setting up a block move that will end up using FP
9881 registers as temporaries. We must make sure this is acceptable. */
9882 if (GET_CODE (operands[0]) == MEM
9883 && GET_CODE (operands[1]) == MEM
9884 && mode == DImode
9885 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
9886 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
9887 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
9888 ? 32 : MEM_ALIGN (operands[0])))
9889 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
9890 ? 32
9891 : MEM_ALIGN (operands[1]))))
9892 && ! MEM_VOLATILE_P (operands [0])
9893 && ! MEM_VOLATILE_P (operands [1]))
9895 emit_move_insn (adjust_address (operands[0], SImode, 0),
9896 adjust_address (operands[1], SImode, 0));
9897 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9898 adjust_address (copy_rtx (operands[1]), SImode, 4));
9899 return;
9902 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9903 && !gpc_reg_operand (operands[1], mode))
9904 operands[1] = force_reg (mode, operands[1]);
9906 /* Recognize the case where operand[1] is a reference to thread-local
9907 data and load its address to a register. */
9908 if (tls_referenced_p (operands[1]))
9910 enum tls_model model;
9911 rtx tmp = operands[1];
9912 rtx addend = NULL;
9914 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9916 addend = XEXP (XEXP (tmp, 0), 1);
9917 tmp = XEXP (XEXP (tmp, 0), 0);
9920 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
9921 model = SYMBOL_REF_TLS_MODEL (tmp);
9922 gcc_assert (model != 0);
9924 tmp = rs6000_legitimize_tls_address (tmp, model);
9925 if (addend)
9927 tmp = gen_rtx_PLUS (mode, tmp, addend);
9928 tmp = force_operand (tmp, operands[0]);
9930 operands[1] = tmp;
9933 /* Handle the case where reload calls us with an invalid address. */
9934 if (reload_in_progress && mode == Pmode
9935 && (! general_operand (operands[1], mode)
9936 || ! nonimmediate_operand (operands[0], mode)))
9937 goto emit_set;
9939 /* 128-bit constant floating-point values on Darwin should really be loaded
9940 as two parts. However, this premature splitting is a problem when DFmode
9941 values can go into Altivec registers. */
9942 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
9943 && GET_CODE (operands[1]) == CONST_DOUBLE)
9945 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9946 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9947 DFmode);
9948 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9949 GET_MODE_SIZE (DFmode)),
9950 simplify_gen_subreg (DFmode, operands[1], mode,
9951 GET_MODE_SIZE (DFmode)),
9952 DFmode);
9953 return;
9956 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
9957 cfun->machine->sdmode_stack_slot =
9958 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
9961 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9962 p1:SD) if p1 is not of floating point class and p0 is spilled as
9963 we can have no analogous movsd_store for this. */
9964 if (lra_in_progress && mode == DDmode
9965 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9966 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9967 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
9968 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9970 enum reg_class cl;
9971 int regno = REGNO (SUBREG_REG (operands[1]));
9973 if (regno >= FIRST_PSEUDO_REGISTER)
9975 cl = reg_preferred_class (regno);
9976 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9978 if (regno >= 0 && ! FP_REGNO_P (regno))
9980 mode = SDmode;
9981 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9982 operands[1] = SUBREG_REG (operands[1]);
9985 if (lra_in_progress
9986 && mode == SDmode
9987 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9988 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9989 && (REG_P (operands[1])
9990 || (GET_CODE (operands[1]) == SUBREG
9991 && REG_P (SUBREG_REG (operands[1])))))
9993 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
9994 ? SUBREG_REG (operands[1]) : operands[1]);
9995 enum reg_class cl;
9997 if (regno >= FIRST_PSEUDO_REGISTER)
9999 cl = reg_preferred_class (regno);
10000 gcc_assert (cl != NO_REGS);
10001 regno = ira_class_hard_regs[cl][0];
10003 if (FP_REGNO_P (regno))
10005 if (GET_MODE (operands[0]) != DDmode)
10006 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10007 emit_insn (gen_movsd_store (operands[0], operands[1]));
10009 else if (INT_REGNO_P (regno))
10010 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10011 else
10012 gcc_unreachable();
10013 return;
10015 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10016 p:DD)) if p0 is not of floating point class and p1 is spilled as
10017 we can have no analogous movsd_load for this. */
10018 if (lra_in_progress && mode == DDmode
10019 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10020 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10021 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10022 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10024 enum reg_class cl;
10025 int regno = REGNO (SUBREG_REG (operands[0]));
10027 if (regno >= FIRST_PSEUDO_REGISTER)
10029 cl = reg_preferred_class (regno);
10030 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10032 if (regno >= 0 && ! FP_REGNO_P (regno))
10034 mode = SDmode;
10035 operands[0] = SUBREG_REG (operands[0]);
10036 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10039 if (lra_in_progress
10040 && mode == SDmode
10041 && (REG_P (operands[0])
10042 || (GET_CODE (operands[0]) == SUBREG
10043 && REG_P (SUBREG_REG (operands[0]))))
10044 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10045 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10047 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10048 ? SUBREG_REG (operands[0]) : operands[0]);
10049 enum reg_class cl;
10051 if (regno >= FIRST_PSEUDO_REGISTER)
10053 cl = reg_preferred_class (regno);
10054 gcc_assert (cl != NO_REGS);
10055 regno = ira_class_hard_regs[cl][0];
10057 if (FP_REGNO_P (regno))
10059 if (GET_MODE (operands[1]) != DDmode)
10060 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10061 emit_insn (gen_movsd_load (operands[0], operands[1]));
10063 else if (INT_REGNO_P (regno))
10064 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10065 else
10066 gcc_unreachable();
10067 return;
10070 if (reload_in_progress
10071 && mode == SDmode
10072 && cfun->machine->sdmode_stack_slot != NULL_RTX
10073 && MEM_P (operands[0])
10074 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
10075 && REG_P (operands[1]))
10077 if (FP_REGNO_P (REGNO (operands[1])))
10079 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
10080 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
10081 emit_insn (gen_movsd_store (mem, operands[1]));
10083 else if (INT_REGNO_P (REGNO (operands[1])))
10085 rtx mem = operands[0];
10086 if (BYTES_BIG_ENDIAN)
10087 mem = adjust_address_nv (mem, mode, 4);
10088 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
10089 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
10091 else
10092 gcc_unreachable();
10093 return;
10095 if (reload_in_progress
10096 && mode == SDmode
10097 && REG_P (operands[0])
10098 && MEM_P (operands[1])
10099 && cfun->machine->sdmode_stack_slot != NULL_RTX
10100 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
10102 if (FP_REGNO_P (REGNO (operands[0])))
10104 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
10105 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
10106 emit_insn (gen_movsd_load (operands[0], mem));
10108 else if (INT_REGNO_P (REGNO (operands[0])))
10110 rtx mem = operands[1];
10111 if (BYTES_BIG_ENDIAN)
10112 mem = adjust_address_nv (mem, mode, 4);
10113 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
10114 emit_insn (gen_movsd_hardfloat (operands[0], mem));
10116 else
10117 gcc_unreachable();
10118 return;
10121 /* FIXME: In the long term, this switch statement should go away
10122 and be replaced by a sequence of tests based on things like
10123 mode == Pmode. */
10124 switch (mode)
10126 case HImode:
10127 case QImode:
10128 if (CONSTANT_P (operands[1])
10129 && GET_CODE (operands[1]) != CONST_INT)
10130 operands[1] = force_const_mem (mode, operands[1]);
10131 break;
10133 case TFmode:
10134 case TDmode:
10135 case IFmode:
10136 case KFmode:
10137 if (FLOAT128_2REG_P (mode))
10138 rs6000_eliminate_indexed_memrefs (operands);
10139 /* fall through */
10141 case DFmode:
10142 case DDmode:
10143 case SFmode:
10144 case SDmode:
10145 if (CONSTANT_P (operands[1])
10146 && ! easy_fp_constant (operands[1], mode))
10147 operands[1] = force_const_mem (mode, operands[1]);
10148 break;
10150 case V16QImode:
10151 case V8HImode:
10152 case V4SFmode:
10153 case V4SImode:
10154 case V4HImode:
10155 case V2SFmode:
10156 case V2SImode:
10157 case V1DImode:
10158 case V2DFmode:
10159 case V2DImode:
10160 case V1TImode:
10161 if (CONSTANT_P (operands[1])
10162 && !easy_vector_constant (operands[1], mode))
10163 operands[1] = force_const_mem (mode, operands[1]);
10164 break;
10166 case SImode:
10167 case DImode:
10168 /* Use default pattern for address of ELF small data */
10169 if (TARGET_ELF
10170 && mode == Pmode
10171 && DEFAULT_ABI == ABI_V4
10172 && (GET_CODE (operands[1]) == SYMBOL_REF
10173 || GET_CODE (operands[1]) == CONST)
10174 && small_data_operand (operands[1], mode))
10176 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10177 return;
10180 if (DEFAULT_ABI == ABI_V4
10181 && mode == Pmode && mode == SImode
10182 && flag_pic == 1 && got_operand (operands[1], mode))
10184 emit_insn (gen_movsi_got (operands[0], operands[1]));
10185 return;
10188 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10189 && TARGET_NO_TOC
10190 && ! flag_pic
10191 && mode == Pmode
10192 && CONSTANT_P (operands[1])
10193 && GET_CODE (operands[1]) != HIGH
10194 && GET_CODE (operands[1]) != CONST_INT)
10196 rtx target = (!can_create_pseudo_p ()
10197 ? operands[0]
10198 : gen_reg_rtx (mode));
10200 /* If this is a function address on -mcall-aixdesc,
10201 convert it to the address of the descriptor. */
10202 if (DEFAULT_ABI == ABI_AIX
10203 && GET_CODE (operands[1]) == SYMBOL_REF
10204 && XSTR (operands[1], 0)[0] == '.')
10206 const char *name = XSTR (operands[1], 0);
10207 rtx new_ref;
10208 while (*name == '.')
10209 name++;
10210 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10211 CONSTANT_POOL_ADDRESS_P (new_ref)
10212 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10213 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10214 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10215 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10216 operands[1] = new_ref;
10219 if (DEFAULT_ABI == ABI_DARWIN)
10221 #if TARGET_MACHO
10222 if (MACHO_DYNAMIC_NO_PIC_P)
10224 /* Take care of any required data indirection. */
10225 operands[1] = rs6000_machopic_legitimize_pic_address (
10226 operands[1], mode, operands[0]);
10227 if (operands[0] != operands[1])
10228 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10229 return;
10231 #endif
10232 emit_insn (gen_macho_high (target, operands[1]));
10233 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10234 return;
10237 emit_insn (gen_elf_high (target, operands[1]));
10238 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10239 return;
10242 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10243 and we have put it in the TOC, we just need to make a TOC-relative
10244 reference to it. */
10245 if (TARGET_TOC
10246 && GET_CODE (operands[1]) == SYMBOL_REF
10247 && use_toc_relative_ref (operands[1], mode))
10248 operands[1] = create_TOC_reference (operands[1], operands[0]);
10249 else if (mode == Pmode
10250 && CONSTANT_P (operands[1])
10251 && GET_CODE (operands[1]) != HIGH
10252 && ((GET_CODE (operands[1]) != CONST_INT
10253 && ! easy_fp_constant (operands[1], mode))
10254 || (GET_CODE (operands[1]) == CONST_INT
10255 && (num_insns_constant (operands[1], mode)
10256 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10257 || (GET_CODE (operands[0]) == REG
10258 && FP_REGNO_P (REGNO (operands[0]))))
10259 && !toc_relative_expr_p (operands[1], false)
10260 && (TARGET_CMODEL == CMODEL_SMALL
10261 || can_create_pseudo_p ()
10262 || (REG_P (operands[0])
10263 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10266 #if TARGET_MACHO
10267 /* Darwin uses a special PIC legitimizer. */
10268 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10270 operands[1] =
10271 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10272 operands[0]);
10273 if (operands[0] != operands[1])
10274 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10275 return;
10277 #endif
10279 /* If we are to limit the number of things we put in the TOC and
10280 this is a symbol plus a constant we can add in one insn,
10281 just put the symbol in the TOC and add the constant. Don't do
10282 this if reload is in progress. */
10283 if (GET_CODE (operands[1]) == CONST
10284 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
10285 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10286 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10287 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10288 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10289 && ! side_effects_p (operands[0]))
10291 rtx sym =
10292 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10293 rtx other = XEXP (XEXP (operands[1], 0), 1);
10295 sym = force_reg (mode, sym);
10296 emit_insn (gen_add3_insn (operands[0], sym, other));
10297 return;
10300 operands[1] = force_const_mem (mode, operands[1]);
10302 if (TARGET_TOC
10303 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10304 && constant_pool_expr_p (XEXP (operands[1], 0))
10305 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
10306 get_pool_constant (XEXP (operands[1], 0)),
10307 get_pool_mode (XEXP (operands[1], 0))))
10309 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10310 operands[0]);
10311 operands[1] = gen_const_mem (mode, tocref);
10312 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10315 break;
10317 case TImode:
10318 if (!VECTOR_MEM_VSX_P (TImode))
10319 rs6000_eliminate_indexed_memrefs (operands);
10320 break;
10322 case PTImode:
10323 rs6000_eliminate_indexed_memrefs (operands);
10324 break;
10326 default:
10327 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10330 /* Above, we may have called force_const_mem which may have returned
10331 an invalid address. If we can, fix this up; otherwise, reload will
10332 have to deal with it. */
10333 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
10334 operands[1] = validize_mem (operands[1]);
10336 emit_set:
10337 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10340 /* Return true if a structure, union or array containing FIELD should be
10341 accessed using `BLKMODE'.
10343 For the SPE, simd types are V2SI, and gcc can be tempted to put the
10344 entire thing in a DI and use subregs to access the internals.
10345 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
10346 back-end. Because a single GPR can hold a V2SI, but not a DI, the
10347 best thing to do is set structs to BLKmode and avoid Severe Tire
10348 Damage.
10350 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
10351 fit into 1, whereas DI still needs two. */
10353 static bool
10354 rs6000_member_type_forces_blk (const_tree field, machine_mode mode)
10356 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
10357 || (TARGET_E500_DOUBLE && mode == DFmode));
10360 /* Nonzero if we can use a floating-point register to pass this arg. */
10361 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10362 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10363 && (CUM)->fregno <= FP_ARG_MAX_REG \
10364 && TARGET_HARD_FLOAT && TARGET_FPRS)
10366 /* Nonzero if we can use an AltiVec register to pass this arg. */
10367 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10368 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10369 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10370 && TARGET_ALTIVEC_ABI \
10371 && (NAMED))
10373 /* Walk down the type tree of TYPE counting consecutive base elements.
10374 If *MODEP is VOIDmode, then set it to the first valid floating point
10375 or vector type. If a non-floating point or vector type is found, or
10376 if a floating point or vector type that doesn't match a non-VOIDmode
10377 *MODEP is found, then return -1, otherwise return the count in the
10378 sub-tree. */
10380 static int
10381 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10383 machine_mode mode;
10384 HOST_WIDE_INT size;
10386 switch (TREE_CODE (type))
10388 case REAL_TYPE:
10389 mode = TYPE_MODE (type);
10390 if (!SCALAR_FLOAT_MODE_P (mode))
10391 return -1;
10393 if (*modep == VOIDmode)
10394 *modep = mode;
10396 if (*modep == mode)
10397 return 1;
10399 break;
10401 case COMPLEX_TYPE:
10402 mode = TYPE_MODE (TREE_TYPE (type));
10403 if (!SCALAR_FLOAT_MODE_P (mode))
10404 return -1;
10406 if (*modep == VOIDmode)
10407 *modep = mode;
10409 if (*modep == mode)
10410 return 2;
10412 break;
10414 case VECTOR_TYPE:
10415 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10416 return -1;
10418 /* Use V4SImode as representative of all 128-bit vector types. */
10419 size = int_size_in_bytes (type);
10420 switch (size)
10422 case 16:
10423 mode = V4SImode;
10424 break;
10425 default:
10426 return -1;
10429 if (*modep == VOIDmode)
10430 *modep = mode;
10432 /* Vector modes are considered to be opaque: two vectors are
10433 equivalent for the purposes of being homogeneous aggregates
10434 if they are the same size. */
10435 if (*modep == mode)
10436 return 1;
10438 break;
10440 case ARRAY_TYPE:
10442 int count;
10443 tree index = TYPE_DOMAIN (type);
10445 /* Can't handle incomplete types nor sizes that are not
10446 fixed. */
10447 if (!COMPLETE_TYPE_P (type)
10448 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10449 return -1;
10451 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10452 if (count == -1
10453 || !index
10454 || !TYPE_MAX_VALUE (index)
10455 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10456 || !TYPE_MIN_VALUE (index)
10457 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10458 || count < 0)
10459 return -1;
10461 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10462 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10464 /* There must be no padding. */
10465 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10466 return -1;
10468 return count;
10471 case RECORD_TYPE:
10473 int count = 0;
10474 int sub_count;
10475 tree field;
10477 /* Can't handle incomplete types nor sizes that are not
10478 fixed. */
10479 if (!COMPLETE_TYPE_P (type)
10480 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10481 return -1;
10483 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10485 if (TREE_CODE (field) != FIELD_DECL)
10486 continue;
10488 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10489 if (sub_count < 0)
10490 return -1;
10491 count += sub_count;
10494 /* There must be no padding. */
10495 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10496 return -1;
10498 return count;
10501 case UNION_TYPE:
10502 case QUAL_UNION_TYPE:
10504 /* These aren't very interesting except in a degenerate case. */
10505 int count = 0;
10506 int sub_count;
10507 tree field;
10509 /* Can't handle incomplete types nor sizes that are not
10510 fixed. */
10511 if (!COMPLETE_TYPE_P (type)
10512 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10513 return -1;
10515 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10517 if (TREE_CODE (field) != FIELD_DECL)
10518 continue;
10520 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10521 if (sub_count < 0)
10522 return -1;
10523 count = count > sub_count ? count : sub_count;
10526 /* There must be no padding. */
10527 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10528 return -1;
10530 return count;
10533 default:
10534 break;
10537 return -1;
10540 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10541 float or vector aggregate that shall be passed in FP/vector registers
10542 according to the ELFv2 ABI, return the homogeneous element mode in
10543 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10545 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10547 static bool
10548 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10549 machine_mode *elt_mode,
10550 int *n_elts)
10552 /* Note that we do not accept complex types at the top level as
10553 homogeneous aggregates; these types are handled via the
10554 targetm.calls.split_complex_arg mechanism. Complex types
10555 can be elements of homogeneous aggregates, however. */
10556 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
10558 machine_mode field_mode = VOIDmode;
10559 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10561 if (field_count > 0)
10563 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
10564 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
10566 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10567 up to AGGR_ARG_NUM_REG registers. */
10568 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
10570 if (elt_mode)
10571 *elt_mode = field_mode;
10572 if (n_elts)
10573 *n_elts = field_count;
10574 return true;
10579 if (elt_mode)
10580 *elt_mode = mode;
10581 if (n_elts)
10582 *n_elts = 1;
10583 return false;
10586 /* Return a nonzero value to say to return the function value in
10587 memory, just as large structures are always returned. TYPE will be
10588 the data type of the value, and FNTYPE will be the type of the
10589 function doing the returning, or @code{NULL} for libcalls.
10591 The AIX ABI for the RS/6000 specifies that all structures are
10592 returned in memory. The Darwin ABI does the same.
10594 For the Darwin 64 Bit ABI, a function result can be returned in
10595 registers or in memory, depending on the size of the return data
10596 type. If it is returned in registers, the value occupies the same
10597 registers as it would if it were the first and only function
10598 argument. Otherwise, the function places its result in memory at
10599 the location pointed to by GPR3.
10601 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10602 but a draft put them in memory, and GCC used to implement the draft
10603 instead of the final standard. Therefore, aix_struct_return
10604 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10605 compatibility can change DRAFT_V4_STRUCT_RET to override the
10606 default, and -m switches get the final word. See
10607 rs6000_option_override_internal for more details.
10609 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10610 long double support is enabled. These values are returned in memory.
10612 int_size_in_bytes returns -1 for variable size objects, which go in
10613 memory always. The cast to unsigned makes -1 > 8. */
10615 static bool
10616 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10618 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10619 if (TARGET_MACHO
10620 && rs6000_darwin64_abi
10621 && TREE_CODE (type) == RECORD_TYPE
10622 && int_size_in_bytes (type) > 0)
10624 CUMULATIVE_ARGS valcum;
10625 rtx valret;
10627 valcum.words = 0;
10628 valcum.fregno = FP_ARG_MIN_REG;
10629 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10630 /* Do a trial code generation as if this were going to be passed
10631 as an argument; if any part goes in memory, we return NULL. */
10632 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10633 if (valret)
10634 return false;
10635 /* Otherwise fall through to more conventional ABI rules. */
10638 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10639 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10640 NULL, NULL))
10641 return false;
10643 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10644 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10645 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10646 return false;
10648 if (AGGREGATE_TYPE_P (type)
10649 && (aix_struct_return
10650 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10651 return true;
10653 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10654 modes only exist for GCC vector types if -maltivec. */
10655 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10656 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10657 return false;
10659 /* Return synthetic vectors in memory. */
10660 if (TREE_CODE (type) == VECTOR_TYPE
10661 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10663 static bool warned_for_return_big_vectors = false;
10664 if (!warned_for_return_big_vectors)
10666 warning (0, "GCC vector returned by reference: "
10667 "non-standard ABI extension with no compatibility guarantee");
10668 warned_for_return_big_vectors = true;
10670 return true;
10673 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10674 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10675 return true;
10677 return false;
10680 /* Specify whether values returned in registers should be at the most
10681 significant end of a register. We want aggregates returned by
10682 value to match the way aggregates are passed to functions. */
10684 static bool
10685 rs6000_return_in_msb (const_tree valtype)
10687 return (DEFAULT_ABI == ABI_ELFv2
10688 && BYTES_BIG_ENDIAN
10689 && AGGREGATE_TYPE_P (valtype)
10690 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
10693 #ifdef HAVE_AS_GNU_ATTRIBUTE
10694 /* Return TRUE if a call to function FNDECL may be one that
10695 potentially affects the function calling ABI of the object file. */
10697 static bool
10698 call_ABI_of_interest (tree fndecl)
10700 if (symtab->state == EXPANSION)
10702 struct cgraph_node *c_node;
10704 /* Libcalls are always interesting. */
10705 if (fndecl == NULL_TREE)
10706 return true;
10708 /* Any call to an external function is interesting. */
10709 if (DECL_EXTERNAL (fndecl))
10710 return true;
10712 /* Interesting functions that we are emitting in this object file. */
10713 c_node = cgraph_node::get (fndecl);
10714 c_node = c_node->ultimate_alias_target ();
10715 return !c_node->only_called_directly_p ();
10717 return false;
10719 #endif
10721 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10722 for a call to a function whose data type is FNTYPE.
10723 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10725 For incoming args we set the number of arguments in the prototype large
10726 so we never return a PARALLEL. */
10728 void
10729 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10730 rtx libname ATTRIBUTE_UNUSED, int incoming,
10731 int libcall, int n_named_args,
10732 tree fndecl ATTRIBUTE_UNUSED,
10733 machine_mode return_mode ATTRIBUTE_UNUSED)
10735 static CUMULATIVE_ARGS zero_cumulative;
10737 *cum = zero_cumulative;
10738 cum->words = 0;
10739 cum->fregno = FP_ARG_MIN_REG;
10740 cum->vregno = ALTIVEC_ARG_MIN_REG;
10741 cum->prototype = (fntype && prototype_p (fntype));
10742 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10743 ? CALL_LIBCALL : CALL_NORMAL);
10744 cum->sysv_gregno = GP_ARG_MIN_REG;
10745 cum->stdarg = stdarg_p (fntype);
10746 cum->libcall = libcall;
10748 cum->nargs_prototype = 0;
10749 if (incoming || cum->prototype)
10750 cum->nargs_prototype = n_named_args;
10752 /* Check for a longcall attribute. */
10753 if ((!fntype && rs6000_default_long_calls)
10754 || (fntype
10755 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10756 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10757 cum->call_cookie |= CALL_LONG;
10759 if (TARGET_DEBUG_ARG)
10761 fprintf (stderr, "\ninit_cumulative_args:");
10762 if (fntype)
10764 tree ret_type = TREE_TYPE (fntype);
10765 fprintf (stderr, " ret code = %s,",
10766 get_tree_code_name (TREE_CODE (ret_type)));
10769 if (cum->call_cookie & CALL_LONG)
10770 fprintf (stderr, " longcall,");
10772 fprintf (stderr, " proto = %d, nargs = %d\n",
10773 cum->prototype, cum->nargs_prototype);
10776 #ifdef HAVE_AS_GNU_ATTRIBUTE
10777 if (DEFAULT_ABI == ABI_V4)
10779 cum->escapes = call_ABI_of_interest (fndecl);
10780 if (cum->escapes)
10782 tree return_type;
10784 if (fntype)
10786 return_type = TREE_TYPE (fntype);
10787 return_mode = TYPE_MODE (return_type);
10789 else
10790 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10792 if (return_type != NULL)
10794 if (TREE_CODE (return_type) == RECORD_TYPE
10795 && TYPE_TRANSPARENT_AGGR (return_type))
10797 return_type = TREE_TYPE (first_field (return_type));
10798 return_mode = TYPE_MODE (return_type);
10800 if (AGGREGATE_TYPE_P (return_type)
10801 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10802 <= 8))
10803 rs6000_returns_struct = true;
10805 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (return_mode))
10806 rs6000_passes_float = true;
10807 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
10808 || SPE_VECTOR_MODE (return_mode))
10809 rs6000_passes_vector = true;
10812 #endif
10814 if (fntype
10815 && !TARGET_ALTIVEC
10816 && TARGET_ALTIVEC_ABI
10817 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10819 error ("cannot return value in vector register because"
10820 " altivec instructions are disabled, use -maltivec"
10821 " to enable them");
10825 /* The mode the ABI uses for a word. This is not the same as word_mode
10826 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10828 static machine_mode
10829 rs6000_abi_word_mode (void)
10831 return TARGET_32BIT ? SImode : DImode;
10834 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10835 static char *
10836 rs6000_offload_options (void)
10838 if (TARGET_64BIT)
10839 return xstrdup ("-foffload-abi=lp64");
10840 else
10841 return xstrdup ("-foffload-abi=ilp32");
10844 /* On rs6000, function arguments are promoted, as are function return
10845 values. */
10847 static machine_mode
10848 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10849 machine_mode mode,
10850 int *punsignedp ATTRIBUTE_UNUSED,
10851 const_tree, int)
10853 PROMOTE_MODE (mode, *punsignedp, type);
10855 return mode;
10858 /* Return true if TYPE must be passed on the stack and not in registers. */
10860 static bool
10861 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10863 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10864 return must_pass_in_stack_var_size (mode, type);
10865 else
10866 return must_pass_in_stack_var_size_or_pad (mode, type);
10869 static inline bool
10870 is_complex_IBM_long_double (machine_mode mode)
10872 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
10875 /* Whether ABI_V4 passes MODE args to a function in floating point
10876 registers. */
10878 static bool
10879 abi_v4_pass_in_fpr (machine_mode mode)
10881 if (!TARGET_FPRS || !TARGET_HARD_FLOAT)
10882 return false;
10883 if (TARGET_SINGLE_FLOAT && mode == SFmode)
10884 return true;
10885 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
10886 return true;
10887 /* ABI_V4 passes complex IBM long double in 8 gprs.
10888 Stupid, but we can't change the ABI now. */
10889 if (is_complex_IBM_long_double (mode))
10890 return false;
10891 if (FLOAT128_2REG_P (mode))
10892 return true;
10893 if (DECIMAL_FLOAT_MODE_P (mode))
10894 return true;
10895 return false;
10898 /* If defined, a C expression which determines whether, and in which
10899 direction, to pad out an argument with extra space. The value
10900 should be of type `enum direction': either `upward' to pad above
10901 the argument, `downward' to pad below, or `none' to inhibit
10902 padding.
10904 For the AIX ABI structs are always stored left shifted in their
10905 argument slot. */
10907 enum direction
10908 function_arg_padding (machine_mode mode, const_tree type)
10910 #ifndef AGGREGATE_PADDING_FIXED
10911 #define AGGREGATE_PADDING_FIXED 0
10912 #endif
10913 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10914 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10915 #endif
10917 if (!AGGREGATE_PADDING_FIXED)
10919 /* GCC used to pass structures of the same size as integer types as
10920 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
10921 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10922 passed padded downward, except that -mstrict-align further
10923 muddied the water in that multi-component structures of 2 and 4
10924 bytes in size were passed padded upward.
10926 The following arranges for best compatibility with previous
10927 versions of gcc, but removes the -mstrict-align dependency. */
10928 if (BYTES_BIG_ENDIAN)
10930 HOST_WIDE_INT size = 0;
10932 if (mode == BLKmode)
10934 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10935 size = int_size_in_bytes (type);
10937 else
10938 size = GET_MODE_SIZE (mode);
10940 if (size == 1 || size == 2 || size == 4)
10941 return downward;
10943 return upward;
10946 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10948 if (type != 0 && AGGREGATE_TYPE_P (type))
10949 return upward;
10952 /* Fall back to the default. */
10953 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10956 /* If defined, a C expression that gives the alignment boundary, in bits,
10957 of an argument with the specified mode and type. If it is not defined,
10958 PARM_BOUNDARY is used for all arguments.
10960 V.4 wants long longs and doubles to be double word aligned. Just
10961 testing the mode size is a boneheaded way to do this as it means
10962 that other types such as complex int are also double word aligned.
10963 However, we're stuck with this because changing the ABI might break
10964 existing library interfaces.
10966 Doubleword align SPE vectors.
10967 Quadword align Altivec/VSX vectors.
10968 Quadword align large synthetic vector types. */
10970 static unsigned int
10971 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10973 machine_mode elt_mode;
10974 int n_elts;
10976 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10978 if (DEFAULT_ABI == ABI_V4
10979 && (GET_MODE_SIZE (mode) == 8
10980 || (TARGET_HARD_FLOAT
10981 && TARGET_FPRS
10982 && !is_complex_IBM_long_double (mode)
10983 && FLOAT128_2REG_P (mode))))
10984 return 64;
10985 else if (FLOAT128_VECTOR_P (mode))
10986 return 128;
10987 else if (SPE_VECTOR_MODE (mode)
10988 || (type && TREE_CODE (type) == VECTOR_TYPE
10989 && int_size_in_bytes (type) >= 8
10990 && int_size_in_bytes (type) < 16))
10991 return 64;
10992 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10993 || (type && TREE_CODE (type) == VECTOR_TYPE
10994 && int_size_in_bytes (type) >= 16))
10995 return 128;
10997 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10998 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10999 -mcompat-align-parm is used. */
11000 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11001 || DEFAULT_ABI == ABI_ELFv2)
11002 && type && TYPE_ALIGN (type) > 64)
11004 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11005 or homogeneous float/vector aggregates here. We already handled
11006 vector aggregates above, but still need to check for float here. */
11007 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11008 && !SCALAR_FLOAT_MODE_P (elt_mode));
11010 /* We used to check for BLKmode instead of the above aggregate type
11011 check. Warn when this results in any difference to the ABI. */
11012 if (aggregate_p != (mode == BLKmode))
11014 static bool warned;
11015 if (!warned && warn_psabi)
11017 warned = true;
11018 inform (input_location,
11019 "the ABI of passing aggregates with %d-byte alignment"
11020 " has changed in GCC 5",
11021 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11025 if (aggregate_p)
11026 return 128;
11029 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11030 implement the "aggregate type" check as a BLKmode check here; this
11031 means certain aggregate types are in fact not aligned. */
11032 if (TARGET_MACHO && rs6000_darwin64_abi
11033 && mode == BLKmode
11034 && type && TYPE_ALIGN (type) > 64)
11035 return 128;
11037 return PARM_BOUNDARY;
11040 /* The offset in words to the start of the parameter save area. */
11042 static unsigned int
11043 rs6000_parm_offset (void)
11045 return (DEFAULT_ABI == ABI_V4 ? 2
11046 : DEFAULT_ABI == ABI_ELFv2 ? 4
11047 : 6);
11050 /* For a function parm of MODE and TYPE, return the starting word in
11051 the parameter area. NWORDS of the parameter area are already used. */
11053 static unsigned int
11054 rs6000_parm_start (machine_mode mode, const_tree type,
11055 unsigned int nwords)
11057 unsigned int align;
11059 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11060 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11063 /* Compute the size (in words) of a function argument. */
11065 static unsigned long
11066 rs6000_arg_size (machine_mode mode, const_tree type)
11068 unsigned long size;
11070 if (mode != BLKmode)
11071 size = GET_MODE_SIZE (mode);
11072 else
11073 size = int_size_in_bytes (type);
11075 if (TARGET_32BIT)
11076 return (size + 3) >> 2;
11077 else
11078 return (size + 7) >> 3;
11081 /* Use this to flush pending int fields. */
11083 static void
11084 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11085 HOST_WIDE_INT bitpos, int final)
11087 unsigned int startbit, endbit;
11088 int intregs, intoffset;
11089 machine_mode mode;
11091 /* Handle the situations where a float is taking up the first half
11092 of the GPR, and the other half is empty (typically due to
11093 alignment restrictions). We can detect this by a 8-byte-aligned
11094 int field, or by seeing that this is the final flush for this
11095 argument. Count the word and continue on. */
11096 if (cum->floats_in_gpr == 1
11097 && (cum->intoffset % 64 == 0
11098 || (cum->intoffset == -1 && final)))
11100 cum->words++;
11101 cum->floats_in_gpr = 0;
11104 if (cum->intoffset == -1)
11105 return;
11107 intoffset = cum->intoffset;
11108 cum->intoffset = -1;
11109 cum->floats_in_gpr = 0;
11111 if (intoffset % BITS_PER_WORD != 0)
11113 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
11114 MODE_INT, 0);
11115 if (mode == BLKmode)
11117 /* We couldn't find an appropriate mode, which happens,
11118 e.g., in packed structs when there are 3 bytes to load.
11119 Back intoffset back to the beginning of the word in this
11120 case. */
11121 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11125 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11126 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11127 intregs = (endbit - startbit) / BITS_PER_WORD;
11128 cum->words += intregs;
11129 /* words should be unsigned. */
11130 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11132 int pad = (endbit/BITS_PER_WORD) - cum->words;
11133 cum->words += pad;
11137 /* The darwin64 ABI calls for us to recurse down through structs,
11138 looking for elements passed in registers. Unfortunately, we have
11139 to track int register count here also because of misalignments
11140 in powerpc alignment mode. */
11142 static void
11143 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11144 const_tree type,
11145 HOST_WIDE_INT startbitpos)
11147 tree f;
11149 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11150 if (TREE_CODE (f) == FIELD_DECL)
11152 HOST_WIDE_INT bitpos = startbitpos;
11153 tree ftype = TREE_TYPE (f);
11154 machine_mode mode;
11155 if (ftype == error_mark_node)
11156 continue;
11157 mode = TYPE_MODE (ftype);
11159 if (DECL_SIZE (f) != 0
11160 && tree_fits_uhwi_p (bit_position (f)))
11161 bitpos += int_bit_position (f);
11163 /* ??? FIXME: else assume zero offset. */
11165 if (TREE_CODE (ftype) == RECORD_TYPE)
11166 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11167 else if (USE_FP_FOR_ARG_P (cum, mode))
11169 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11170 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11171 cum->fregno += n_fpregs;
11172 /* Single-precision floats present a special problem for
11173 us, because they are smaller than an 8-byte GPR, and so
11174 the structure-packing rules combined with the standard
11175 varargs behavior mean that we want to pack float/float
11176 and float/int combinations into a single register's
11177 space. This is complicated by the arg advance flushing,
11178 which works on arbitrarily large groups of int-type
11179 fields. */
11180 if (mode == SFmode)
11182 if (cum->floats_in_gpr == 1)
11184 /* Two floats in a word; count the word and reset
11185 the float count. */
11186 cum->words++;
11187 cum->floats_in_gpr = 0;
11189 else if (bitpos % 64 == 0)
11191 /* A float at the beginning of an 8-byte word;
11192 count it and put off adjusting cum->words until
11193 we see if a arg advance flush is going to do it
11194 for us. */
11195 cum->floats_in_gpr++;
11197 else
11199 /* The float is at the end of a word, preceded
11200 by integer fields, so the arg advance flush
11201 just above has already set cum->words and
11202 everything is taken care of. */
11205 else
11206 cum->words += n_fpregs;
11208 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11210 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11211 cum->vregno++;
11212 cum->words += 2;
11214 else if (cum->intoffset == -1)
11215 cum->intoffset = bitpos;
11219 /* Check for an item that needs to be considered specially under the darwin 64
11220 bit ABI. These are record types where the mode is BLK or the structure is
11221 8 bytes in size. */
11222 static int
11223 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11225 return rs6000_darwin64_abi
11226 && ((mode == BLKmode
11227 && TREE_CODE (type) == RECORD_TYPE
11228 && int_size_in_bytes (type) > 0)
11229 || (type && TREE_CODE (type) == RECORD_TYPE
11230 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11233 /* Update the data in CUM to advance over an argument
11234 of mode MODE and data type TYPE.
11235 (TYPE is null for libcalls where that information may not be available.)
11237 Note that for args passed by reference, function_arg will be called
11238 with MODE and TYPE set to that of the pointer to the arg, not the arg
11239 itself. */
11241 static void
11242 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11243 const_tree type, bool named, int depth)
11245 machine_mode elt_mode;
11246 int n_elts;
11248 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11250 /* Only tick off an argument if we're not recursing. */
11251 if (depth == 0)
11252 cum->nargs_prototype--;
11254 #ifdef HAVE_AS_GNU_ATTRIBUTE
11255 if (DEFAULT_ABI == ABI_V4
11256 && cum->escapes)
11258 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode))
11259 rs6000_passes_float = true;
11260 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11261 rs6000_passes_vector = true;
11262 else if (SPE_VECTOR_MODE (mode)
11263 && !cum->stdarg
11264 && cum->sysv_gregno <= GP_ARG_MAX_REG)
11265 rs6000_passes_vector = true;
11267 #endif
11269 if (TARGET_ALTIVEC_ABI
11270 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11271 || (type && TREE_CODE (type) == VECTOR_TYPE
11272 && int_size_in_bytes (type) == 16)))
11274 bool stack = false;
11276 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11278 cum->vregno += n_elts;
11280 if (!TARGET_ALTIVEC)
11281 error ("cannot pass argument in vector register because"
11282 " altivec instructions are disabled, use -maltivec"
11283 " to enable them");
11285 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11286 even if it is going to be passed in a vector register.
11287 Darwin does the same for variable-argument functions. */
11288 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11289 && TARGET_64BIT)
11290 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11291 stack = true;
11293 else
11294 stack = true;
11296 if (stack)
11298 int align;
11300 /* Vector parameters must be 16-byte aligned. In 32-bit
11301 mode this means we need to take into account the offset
11302 to the parameter save area. In 64-bit mode, they just
11303 have to start on an even word, since the parameter save
11304 area is 16-byte aligned. */
11305 if (TARGET_32BIT)
11306 align = -(rs6000_parm_offset () + cum->words) & 3;
11307 else
11308 align = cum->words & 1;
11309 cum->words += align + rs6000_arg_size (mode, type);
11311 if (TARGET_DEBUG_ARG)
11313 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11314 cum->words, align);
11315 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11316 cum->nargs_prototype, cum->prototype,
11317 GET_MODE_NAME (mode));
11321 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
11322 && !cum->stdarg
11323 && cum->sysv_gregno <= GP_ARG_MAX_REG)
11324 cum->sysv_gregno++;
11326 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11328 int size = int_size_in_bytes (type);
11329 /* Variable sized types have size == -1 and are
11330 treated as if consisting entirely of ints.
11331 Pad to 16 byte boundary if needed. */
11332 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11333 && (cum->words % 2) != 0)
11334 cum->words++;
11335 /* For varargs, we can just go up by the size of the struct. */
11336 if (!named)
11337 cum->words += (size + 7) / 8;
11338 else
11340 /* It is tempting to say int register count just goes up by
11341 sizeof(type)/8, but this is wrong in a case such as
11342 { int; double; int; } [powerpc alignment]. We have to
11343 grovel through the fields for these too. */
11344 cum->intoffset = 0;
11345 cum->floats_in_gpr = 0;
11346 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11347 rs6000_darwin64_record_arg_advance_flush (cum,
11348 size * BITS_PER_UNIT, 1);
11350 if (TARGET_DEBUG_ARG)
11352 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11353 cum->words, TYPE_ALIGN (type), size);
11354 fprintf (stderr,
11355 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11356 cum->nargs_prototype, cum->prototype,
11357 GET_MODE_NAME (mode));
11360 else if (DEFAULT_ABI == ABI_V4)
11362 if (abi_v4_pass_in_fpr (mode))
11364 /* _Decimal128 must use an even/odd register pair. This assumes
11365 that the register number is odd when fregno is odd. */
11366 if (mode == TDmode && (cum->fregno % 2) == 1)
11367 cum->fregno++;
11369 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11370 <= FP_ARG_V4_MAX_REG)
11371 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11372 else
11374 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11375 if (mode == DFmode || FLOAT128_IBM_P (mode)
11376 || mode == DDmode || mode == TDmode)
11377 cum->words += cum->words & 1;
11378 cum->words += rs6000_arg_size (mode, type);
11381 else
11383 int n_words = rs6000_arg_size (mode, type);
11384 int gregno = cum->sysv_gregno;
11386 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
11387 (r7,r8) or (r9,r10). As does any other 2 word item such
11388 as complex int due to a historical mistake. */
11389 if (n_words == 2)
11390 gregno += (1 - gregno) & 1;
11392 /* Multi-reg args are not split between registers and stack. */
11393 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11395 /* Long long and SPE vectors are aligned on the stack.
11396 So are other 2 word items such as complex int due to
11397 a historical mistake. */
11398 if (n_words == 2)
11399 cum->words += cum->words & 1;
11400 cum->words += n_words;
11403 /* Note: continuing to accumulate gregno past when we've started
11404 spilling to the stack indicates the fact that we've started
11405 spilling to the stack to expand_builtin_saveregs. */
11406 cum->sysv_gregno = gregno + n_words;
11409 if (TARGET_DEBUG_ARG)
11411 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11412 cum->words, cum->fregno);
11413 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11414 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11415 fprintf (stderr, "mode = %4s, named = %d\n",
11416 GET_MODE_NAME (mode), named);
11419 else
11421 int n_words = rs6000_arg_size (mode, type);
11422 int start_words = cum->words;
11423 int align_words = rs6000_parm_start (mode, type, start_words);
11425 cum->words = align_words + n_words;
11427 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
11429 /* _Decimal128 must be passed in an even/odd float register pair.
11430 This assumes that the register number is odd when fregno is
11431 odd. */
11432 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11433 cum->fregno++;
11434 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11437 if (TARGET_DEBUG_ARG)
11439 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11440 cum->words, cum->fregno);
11441 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11442 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11443 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11444 named, align_words - start_words, depth);
11449 static void
11450 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11451 const_tree type, bool named)
11453 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11457 static rtx
11458 spe_build_register_parallel (machine_mode mode, int gregno)
11460 rtx r1, r3, r5, r7;
11462 switch (mode)
11464 case DFmode:
11465 r1 = gen_rtx_REG (DImode, gregno);
11466 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11467 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
11469 case DCmode:
11470 case TFmode:
11471 r1 = gen_rtx_REG (DImode, gregno);
11472 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11473 r3 = gen_rtx_REG (DImode, gregno + 2);
11474 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
11475 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
11477 case TCmode:
11478 r1 = gen_rtx_REG (DImode, gregno);
11479 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11480 r3 = gen_rtx_REG (DImode, gregno + 2);
11481 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
11482 r5 = gen_rtx_REG (DImode, gregno + 4);
11483 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
11484 r7 = gen_rtx_REG (DImode, gregno + 6);
11485 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
11486 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
11488 default:
11489 gcc_unreachable ();
11493 /* Determine where to put a SIMD argument on the SPE. */
11494 static rtx
11495 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, machine_mode mode,
11496 const_tree type)
11498 int gregno = cum->sysv_gregno;
11500 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
11501 are passed and returned in a pair of GPRs for ABI compatibility. */
11502 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
11503 || mode == DCmode || mode == TCmode))
11505 int n_words = rs6000_arg_size (mode, type);
11507 /* Doubles go in an odd/even register pair (r5/r6, etc). */
11508 if (mode == DFmode)
11509 gregno += (1 - gregno) & 1;
11511 /* Multi-reg args are not split between registers and stack. */
11512 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11513 return NULL_RTX;
11515 return spe_build_register_parallel (mode, gregno);
11517 if (cum->stdarg)
11519 int n_words = rs6000_arg_size (mode, type);
11521 /* SPE vectors are put in odd registers. */
11522 if (n_words == 2 && (gregno & 1) == 0)
11523 gregno += 1;
11525 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
11527 rtx r1, r2;
11528 machine_mode m = SImode;
11530 r1 = gen_rtx_REG (m, gregno);
11531 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
11532 r2 = gen_rtx_REG (m, gregno + 1);
11533 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
11534 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
11536 else
11537 return NULL_RTX;
11539 else
11541 if (gregno <= GP_ARG_MAX_REG)
11542 return gen_rtx_REG (mode, gregno);
11543 else
11544 return NULL_RTX;
11548 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11549 structure between cum->intoffset and bitpos to integer registers. */
11551 static void
11552 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11553 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11555 machine_mode mode;
11556 unsigned int regno;
11557 unsigned int startbit, endbit;
11558 int this_regno, intregs, intoffset;
11559 rtx reg;
11561 if (cum->intoffset == -1)
11562 return;
11564 intoffset = cum->intoffset;
11565 cum->intoffset = -1;
11567 /* If this is the trailing part of a word, try to only load that
11568 much into the register. Otherwise load the whole register. Note
11569 that in the latter case we may pick up unwanted bits. It's not a
11570 problem at the moment but may wish to revisit. */
11572 if (intoffset % BITS_PER_WORD != 0)
11574 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
11575 MODE_INT, 0);
11576 if (mode == BLKmode)
11578 /* We couldn't find an appropriate mode, which happens,
11579 e.g., in packed structs when there are 3 bytes to load.
11580 Back intoffset back to the beginning of the word in this
11581 case. */
11582 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11583 mode = word_mode;
11586 else
11587 mode = word_mode;
11589 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11590 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11591 intregs = (endbit - startbit) / BITS_PER_WORD;
11592 this_regno = cum->words + intoffset / BITS_PER_WORD;
11594 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11595 cum->use_stack = 1;
11597 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11598 if (intregs <= 0)
11599 return;
11601 intoffset /= BITS_PER_UNIT;
11604 regno = GP_ARG_MIN_REG + this_regno;
11605 reg = gen_rtx_REG (mode, regno);
11606 rvec[(*k)++] =
11607 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11609 this_regno += 1;
11610 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11611 mode = word_mode;
11612 intregs -= 1;
11614 while (intregs > 0);
11617 /* Recursive workhorse for the following. */
11619 static void
11620 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11621 HOST_WIDE_INT startbitpos, rtx rvec[],
11622 int *k)
11624 tree f;
11626 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11627 if (TREE_CODE (f) == FIELD_DECL)
11629 HOST_WIDE_INT bitpos = startbitpos;
11630 tree ftype = TREE_TYPE (f);
11631 machine_mode mode;
11632 if (ftype == error_mark_node)
11633 continue;
11634 mode = TYPE_MODE (ftype);
11636 if (DECL_SIZE (f) != 0
11637 && tree_fits_uhwi_p (bit_position (f)))
11638 bitpos += int_bit_position (f);
11640 /* ??? FIXME: else assume zero offset. */
11642 if (TREE_CODE (ftype) == RECORD_TYPE)
11643 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11644 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11646 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11647 #if 0
11648 switch (mode)
11650 case SCmode: mode = SFmode; break;
11651 case DCmode: mode = DFmode; break;
11652 case TCmode: mode = TFmode; break;
11653 default: break;
11655 #endif
11656 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11657 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11659 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11660 && (mode == TFmode || mode == TDmode));
11661 /* Long double or _Decimal128 split over regs and memory. */
11662 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11663 cum->use_stack=1;
11665 rvec[(*k)++]
11666 = gen_rtx_EXPR_LIST (VOIDmode,
11667 gen_rtx_REG (mode, cum->fregno++),
11668 GEN_INT (bitpos / BITS_PER_UNIT));
11669 if (FLOAT128_2REG_P (mode))
11670 cum->fregno++;
11672 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11674 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11675 rvec[(*k)++]
11676 = gen_rtx_EXPR_LIST (VOIDmode,
11677 gen_rtx_REG (mode, cum->vregno++),
11678 GEN_INT (bitpos / BITS_PER_UNIT));
11680 else if (cum->intoffset == -1)
11681 cum->intoffset = bitpos;
11685 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11686 the register(s) to be used for each field and subfield of a struct
11687 being passed by value, along with the offset of where the
11688 register's value may be found in the block. FP fields go in FP
11689 register, vector fields go in vector registers, and everything
11690 else goes in int registers, packed as in memory.
11692 This code is also used for function return values. RETVAL indicates
11693 whether this is the case.
11695 Much of this is taken from the SPARC V9 port, which has a similar
11696 calling convention. */
11698 static rtx
11699 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11700 bool named, bool retval)
11702 rtx rvec[FIRST_PSEUDO_REGISTER];
11703 int k = 1, kbase = 1;
11704 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11705 /* This is a copy; modifications are not visible to our caller. */
11706 CUMULATIVE_ARGS copy_cum = *orig_cum;
11707 CUMULATIVE_ARGS *cum = &copy_cum;
11709 /* Pad to 16 byte boundary if needed. */
11710 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11711 && (cum->words % 2) != 0)
11712 cum->words++;
11714 cum->intoffset = 0;
11715 cum->use_stack = 0;
11716 cum->named = named;
11718 /* Put entries into rvec[] for individual FP and vector fields, and
11719 for the chunks of memory that go in int regs. Note we start at
11720 element 1; 0 is reserved for an indication of using memory, and
11721 may or may not be filled in below. */
11722 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11723 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11725 /* If any part of the struct went on the stack put all of it there.
11726 This hack is because the generic code for
11727 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11728 parts of the struct are not at the beginning. */
11729 if (cum->use_stack)
11731 if (retval)
11732 return NULL_RTX; /* doesn't go in registers at all */
11733 kbase = 0;
11734 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11736 if (k > 1 || cum->use_stack)
11737 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11738 else
11739 return NULL_RTX;
11742 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11744 static rtx
11745 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11746 int align_words)
11748 int n_units;
11749 int i, k;
11750 rtx rvec[GP_ARG_NUM_REG + 1];
11752 if (align_words >= GP_ARG_NUM_REG)
11753 return NULL_RTX;
11755 n_units = rs6000_arg_size (mode, type);
11757 /* Optimize the simple case where the arg fits in one gpr, except in
11758 the case of BLKmode due to assign_parms assuming that registers are
11759 BITS_PER_WORD wide. */
11760 if (n_units == 0
11761 || (n_units == 1 && mode != BLKmode))
11762 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11764 k = 0;
11765 if (align_words + n_units > GP_ARG_NUM_REG)
11766 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11767 using a magic NULL_RTX component.
11768 This is not strictly correct. Only some of the arg belongs in
11769 memory, not all of it. However, the normal scheme using
11770 function_arg_partial_nregs can result in unusual subregs, eg.
11771 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11772 store the whole arg to memory is often more efficient than code
11773 to store pieces, and we know that space is available in the right
11774 place for the whole arg. */
11775 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11777 i = 0;
11780 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11781 rtx off = GEN_INT (i++ * 4);
11782 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11784 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11786 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11789 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11790 but must also be copied into the parameter save area starting at
11791 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11792 to the GPRs and/or memory. Return the number of elements used. */
11794 static int
11795 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11796 int align_words, rtx *rvec)
11798 int k = 0;
11800 if (align_words < GP_ARG_NUM_REG)
11802 int n_words = rs6000_arg_size (mode, type);
11804 if (align_words + n_words > GP_ARG_NUM_REG
11805 || mode == BLKmode
11806 || (TARGET_32BIT && TARGET_POWERPC64))
11808 /* If this is partially on the stack, then we only
11809 include the portion actually in registers here. */
11810 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11811 int i = 0;
11813 if (align_words + n_words > GP_ARG_NUM_REG)
11815 /* Not all of the arg fits in gprs. Say that it goes in memory
11816 too, using a magic NULL_RTX component. Also see comment in
11817 rs6000_mixed_function_arg for why the normal
11818 function_arg_partial_nregs scheme doesn't work in this case. */
11819 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11824 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11825 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11826 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11828 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11830 else
11832 /* The whole arg fits in gprs. */
11833 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11834 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11837 else
11839 /* It's entirely in memory. */
11840 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11843 return k;
11846 /* RVEC is a vector of K components of an argument of mode MODE.
11847 Construct the final function_arg return value from it. */
11849 static rtx
11850 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11852 gcc_assert (k >= 1);
11854 /* Avoid returning a PARALLEL in the trivial cases. */
11855 if (k == 1)
11857 if (XEXP (rvec[0], 0) == NULL_RTX)
11858 return NULL_RTX;
11860 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11861 return XEXP (rvec[0], 0);
11864 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11867 /* Determine where to put an argument to a function.
11868 Value is zero to push the argument on the stack,
11869 or a hard register in which to store the argument.
11871 MODE is the argument's machine mode.
11872 TYPE is the data type of the argument (as a tree).
11873 This is null for libcalls where that information may
11874 not be available.
11875 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11876 the preceding args and about the function being called. It is
11877 not modified in this routine.
11878 NAMED is nonzero if this argument is a named parameter
11879 (otherwise it is an extra parameter matching an ellipsis).
11881 On RS/6000 the first eight words of non-FP are normally in registers
11882 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11883 Under V.4, the first 8 FP args are in registers.
11885 If this is floating-point and no prototype is specified, we use
11886 both an FP and integer register (or possibly FP reg and stack). Library
11887 functions (when CALL_LIBCALL is set) always have the proper types for args,
11888 so we can pass the FP value just in one register. emit_library_function
11889 doesn't support PARALLEL anyway.
11891 Note that for args passed by reference, function_arg will be called
11892 with MODE and TYPE set to that of the pointer to the arg, not the arg
11893 itself. */
11895 static rtx
11896 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11897 const_tree type, bool named)
11899 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11900 enum rs6000_abi abi = DEFAULT_ABI;
11901 machine_mode elt_mode;
11902 int n_elts;
11904 /* Return a marker to indicate whether CR1 needs to set or clear the
11905 bit that V.4 uses to say fp args were passed in registers.
11906 Assume that we don't need the marker for software floating point,
11907 or compiler generated library calls. */
11908 if (mode == VOIDmode)
11910 if (abi == ABI_V4
11911 && (cum->call_cookie & CALL_LIBCALL) == 0
11912 && (cum->stdarg
11913 || (cum->nargs_prototype < 0
11914 && (cum->prototype || TARGET_NO_PROTOTYPE))))
11916 /* For the SPE, we need to crxor CR6 always. */
11917 if (TARGET_SPE_ABI)
11918 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
11919 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
11920 return GEN_INT (cum->call_cookie
11921 | ((cum->fregno == FP_ARG_MIN_REG)
11922 ? CALL_V4_SET_FP_ARGS
11923 : CALL_V4_CLEAR_FP_ARGS));
11926 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11929 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11931 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11933 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11934 if (rslt != NULL_RTX)
11935 return rslt;
11936 /* Else fall through to usual handling. */
11939 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11941 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11942 rtx r, off;
11943 int i, k = 0;
11945 /* Do we also need to pass this argument in the parameter save area?
11946 Library support functions for IEEE 128-bit are assumed to not need the
11947 value passed both in GPRs and in vector registers. */
11948 if (TARGET_64BIT && !cum->prototype
11949 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11951 int align_words = ROUND_UP (cum->words, 2);
11952 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11955 /* Describe where this argument goes in the vector registers. */
11956 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11958 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11959 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11960 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11963 return rs6000_finish_function_arg (mode, rvec, k);
11965 else if (TARGET_ALTIVEC_ABI
11966 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11967 || (type && TREE_CODE (type) == VECTOR_TYPE
11968 && int_size_in_bytes (type) == 16)))
11970 if (named || abi == ABI_V4)
11971 return NULL_RTX;
11972 else
11974 /* Vector parameters to varargs functions under AIX or Darwin
11975 get passed in memory and possibly also in GPRs. */
11976 int align, align_words, n_words;
11977 machine_mode part_mode;
11979 /* Vector parameters must be 16-byte aligned. In 32-bit
11980 mode this means we need to take into account the offset
11981 to the parameter save area. In 64-bit mode, they just
11982 have to start on an even word, since the parameter save
11983 area is 16-byte aligned. */
11984 if (TARGET_32BIT)
11985 align = -(rs6000_parm_offset () + cum->words) & 3;
11986 else
11987 align = cum->words & 1;
11988 align_words = cum->words + align;
11990 /* Out of registers? Memory, then. */
11991 if (align_words >= GP_ARG_NUM_REG)
11992 return NULL_RTX;
11994 if (TARGET_32BIT && TARGET_POWERPC64)
11995 return rs6000_mixed_function_arg (mode, type, align_words);
11997 /* The vector value goes in GPRs. Only the part of the
11998 value in GPRs is reported here. */
11999 part_mode = mode;
12000 n_words = rs6000_arg_size (mode, type);
12001 if (align_words + n_words > GP_ARG_NUM_REG)
12002 /* Fortunately, there are only two possibilities, the value
12003 is either wholly in GPRs or half in GPRs and half not. */
12004 part_mode = DImode;
12006 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12009 else if (TARGET_SPE_ABI && TARGET_SPE
12010 && (SPE_VECTOR_MODE (mode)
12011 || (TARGET_E500_DOUBLE && (mode == DFmode
12012 || mode == DCmode
12013 || mode == TFmode
12014 || mode == TCmode))))
12015 return rs6000_spe_function_arg (cum, mode, type);
12017 else if (abi == ABI_V4)
12019 if (abi_v4_pass_in_fpr (mode))
12021 /* _Decimal128 must use an even/odd register pair. This assumes
12022 that the register number is odd when fregno is odd. */
12023 if (mode == TDmode && (cum->fregno % 2) == 1)
12024 cum->fregno++;
12026 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12027 <= FP_ARG_V4_MAX_REG)
12028 return gen_rtx_REG (mode, cum->fregno);
12029 else
12030 return NULL_RTX;
12032 else
12034 int n_words = rs6000_arg_size (mode, type);
12035 int gregno = cum->sysv_gregno;
12037 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
12038 (r7,r8) or (r9,r10). As does any other 2 word item such
12039 as complex int due to a historical mistake. */
12040 if (n_words == 2)
12041 gregno += (1 - gregno) & 1;
12043 /* Multi-reg args are not split between registers and stack. */
12044 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12045 return NULL_RTX;
12047 if (TARGET_32BIT && TARGET_POWERPC64)
12048 return rs6000_mixed_function_arg (mode, type,
12049 gregno - GP_ARG_MIN_REG);
12050 return gen_rtx_REG (mode, gregno);
12053 else
12055 int align_words = rs6000_parm_start (mode, type, cum->words);
12057 /* _Decimal128 must be passed in an even/odd float register pair.
12058 This assumes that the register number is odd when fregno is odd. */
12059 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12060 cum->fregno++;
12062 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12064 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12065 rtx r, off;
12066 int i, k = 0;
12067 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12068 int fpr_words;
12070 /* Do we also need to pass this argument in the parameter
12071 save area? */
12072 if (type && (cum->nargs_prototype <= 0
12073 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12074 && TARGET_XL_COMPAT
12075 && align_words >= GP_ARG_NUM_REG)))
12076 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12078 /* Describe where this argument goes in the fprs. */
12079 for (i = 0; i < n_elts
12080 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12082 /* Check if the argument is split over registers and memory.
12083 This can only ever happen for long double or _Decimal128;
12084 complex types are handled via split_complex_arg. */
12085 machine_mode fmode = elt_mode;
12086 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12088 gcc_assert (FLOAT128_2REG_P (fmode));
12089 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12092 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12093 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12094 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12097 /* If there were not enough FPRs to hold the argument, the rest
12098 usually goes into memory. However, if the current position
12099 is still within the register parameter area, a portion may
12100 actually have to go into GPRs.
12102 Note that it may happen that the portion of the argument
12103 passed in the first "half" of the first GPR was already
12104 passed in the last FPR as well.
12106 For unnamed arguments, we already set up GPRs to cover the
12107 whole argument in rs6000_psave_function_arg, so there is
12108 nothing further to do at this point. */
12109 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12110 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12111 && cum->nargs_prototype > 0)
12113 static bool warned;
12115 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12116 int n_words = rs6000_arg_size (mode, type);
12118 align_words += fpr_words;
12119 n_words -= fpr_words;
12123 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12124 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12125 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12127 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12129 if (!warned && warn_psabi)
12131 warned = true;
12132 inform (input_location,
12133 "the ABI of passing homogeneous float aggregates"
12134 " has changed in GCC 5");
12138 return rs6000_finish_function_arg (mode, rvec, k);
12140 else if (align_words < GP_ARG_NUM_REG)
12142 if (TARGET_32BIT && TARGET_POWERPC64)
12143 return rs6000_mixed_function_arg (mode, type, align_words);
12145 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12147 else
12148 return NULL_RTX;
12152 /* For an arg passed partly in registers and partly in memory, this is
12153 the number of bytes passed in registers. For args passed entirely in
12154 registers or entirely in memory, zero. When an arg is described by a
12155 PARALLEL, perhaps using more than one register type, this function
12156 returns the number of bytes used by the first element of the PARALLEL. */
12158 static int
12159 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12160 tree type, bool named)
12162 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12163 bool passed_in_gprs = true;
12164 int ret = 0;
12165 int align_words;
12166 machine_mode elt_mode;
12167 int n_elts;
12169 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12171 if (DEFAULT_ABI == ABI_V4)
12172 return 0;
12174 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12176 /* If we are passing this arg in the fixed parameter save area (gprs or
12177 memory) as well as VRs, we do not use the partial bytes mechanism;
12178 instead, rs6000_function_arg will return a PARALLEL including a memory
12179 element as necessary. Library support functions for IEEE 128-bit are
12180 assumed to not need the value passed both in GPRs and in vector
12181 registers. */
12182 if (TARGET_64BIT && !cum->prototype
12183 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12184 return 0;
12186 /* Otherwise, we pass in VRs only. Check for partial copies. */
12187 passed_in_gprs = false;
12188 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12189 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12192 /* In this complicated case we just disable the partial_nregs code. */
12193 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12194 return 0;
12196 align_words = rs6000_parm_start (mode, type, cum->words);
12198 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12200 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12202 /* If we are passing this arg in the fixed parameter save area
12203 (gprs or memory) as well as FPRs, we do not use the partial
12204 bytes mechanism; instead, rs6000_function_arg will return a
12205 PARALLEL including a memory element as necessary. */
12206 if (type
12207 && (cum->nargs_prototype <= 0
12208 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12209 && TARGET_XL_COMPAT
12210 && align_words >= GP_ARG_NUM_REG)))
12211 return 0;
12213 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12214 passed_in_gprs = false;
12215 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12217 /* Compute number of bytes / words passed in FPRs. If there
12218 is still space available in the register parameter area
12219 *after* that amount, a part of the argument will be passed
12220 in GPRs. In that case, the total amount passed in any
12221 registers is equal to the amount that would have been passed
12222 in GPRs if everything were passed there, so we fall back to
12223 the GPR code below to compute the appropriate value. */
12224 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12225 * MIN (8, GET_MODE_SIZE (elt_mode)));
12226 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12228 if (align_words + fpr_words < GP_ARG_NUM_REG)
12229 passed_in_gprs = true;
12230 else
12231 ret = fpr;
12235 if (passed_in_gprs
12236 && align_words < GP_ARG_NUM_REG
12237 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12238 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12240 if (ret != 0 && TARGET_DEBUG_ARG)
12241 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12243 return ret;
12246 /* A C expression that indicates when an argument must be passed by
12247 reference. If nonzero for an argument, a copy of that argument is
12248 made in memory and a pointer to the argument is passed instead of
12249 the argument itself. The pointer is passed in whatever way is
12250 appropriate for passing a pointer to that type.
12252 Under V.4, aggregates and long double are passed by reference.
12254 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12255 reference unless the AltiVec vector extension ABI is in force.
12257 As an extension to all ABIs, variable sized types are passed by
12258 reference. */
12260 static bool
12261 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12262 machine_mode mode, const_tree type,
12263 bool named ATTRIBUTE_UNUSED)
12265 if (!type)
12266 return 0;
12268 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12269 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12271 if (TARGET_DEBUG_ARG)
12272 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12273 return 1;
12276 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12278 if (TARGET_DEBUG_ARG)
12279 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12280 return 1;
12283 if (int_size_in_bytes (type) < 0)
12285 if (TARGET_DEBUG_ARG)
12286 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12287 return 1;
12290 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12291 modes only exist for GCC vector types if -maltivec. */
12292 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12294 if (TARGET_DEBUG_ARG)
12295 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12296 return 1;
12299 /* Pass synthetic vectors in memory. */
12300 if (TREE_CODE (type) == VECTOR_TYPE
12301 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12303 static bool warned_for_pass_big_vectors = false;
12304 if (TARGET_DEBUG_ARG)
12305 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12306 if (!warned_for_pass_big_vectors)
12308 warning (0, "GCC vector passed by reference: "
12309 "non-standard ABI extension with no compatibility guarantee");
12310 warned_for_pass_big_vectors = true;
12312 return 1;
12315 return 0;
12318 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12319 already processes. Return true if the parameter must be passed
12320 (fully or partially) on the stack. */
12322 static bool
12323 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12325 machine_mode mode;
12326 int unsignedp;
12327 rtx entry_parm;
12329 /* Catch errors. */
12330 if (type == NULL || type == error_mark_node)
12331 return true;
12333 /* Handle types with no storage requirement. */
12334 if (TYPE_MODE (type) == VOIDmode)
12335 return false;
12337 /* Handle complex types. */
12338 if (TREE_CODE (type) == COMPLEX_TYPE)
12339 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12340 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12342 /* Handle transparent aggregates. */
12343 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12344 && TYPE_TRANSPARENT_AGGR (type))
12345 type = TREE_TYPE (first_field (type));
12347 /* See if this arg was passed by invisible reference. */
12348 if (pass_by_reference (get_cumulative_args (args_so_far),
12349 TYPE_MODE (type), type, true))
12350 type = build_pointer_type (type);
12352 /* Find mode as it is passed by the ABI. */
12353 unsignedp = TYPE_UNSIGNED (type);
12354 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12356 /* If we must pass in stack, we need a stack. */
12357 if (rs6000_must_pass_in_stack (mode, type))
12358 return true;
12360 /* If there is no incoming register, we need a stack. */
12361 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12362 if (entry_parm == NULL)
12363 return true;
12365 /* Likewise if we need to pass both in registers and on the stack. */
12366 if (GET_CODE (entry_parm) == PARALLEL
12367 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12368 return true;
12370 /* Also true if we're partially in registers and partially not. */
12371 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12372 return true;
12374 /* Update info on where next arg arrives in registers. */
12375 rs6000_function_arg_advance (args_so_far, mode, type, true);
12376 return false;
12379 /* Return true if FUN has no prototype, has a variable argument
12380 list, or passes any parameter in memory. */
12382 static bool
12383 rs6000_function_parms_need_stack (tree fun, bool incoming)
12385 tree fntype, result;
12386 CUMULATIVE_ARGS args_so_far_v;
12387 cumulative_args_t args_so_far;
12389 if (!fun)
12390 /* Must be a libcall, all of which only use reg parms. */
12391 return false;
12393 fntype = fun;
12394 if (!TYPE_P (fun))
12395 fntype = TREE_TYPE (fun);
12397 /* Varargs functions need the parameter save area. */
12398 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12399 return true;
12401 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12402 args_so_far = pack_cumulative_args (&args_so_far_v);
12404 /* When incoming, we will have been passed the function decl.
12405 It is necessary to use the decl to handle K&R style functions,
12406 where TYPE_ARG_TYPES may not be available. */
12407 if (incoming)
12409 gcc_assert (DECL_P (fun));
12410 result = DECL_RESULT (fun);
12412 else
12413 result = TREE_TYPE (fntype);
12415 if (result && aggregate_value_p (result, fntype))
12417 if (!TYPE_P (result))
12418 result = TREE_TYPE (result);
12419 result = build_pointer_type (result);
12420 rs6000_parm_needs_stack (args_so_far, result);
12423 if (incoming)
12425 tree parm;
12427 for (parm = DECL_ARGUMENTS (fun);
12428 parm && parm != void_list_node;
12429 parm = TREE_CHAIN (parm))
12430 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12431 return true;
12433 else
12435 function_args_iterator args_iter;
12436 tree arg_type;
12438 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12439 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12440 return true;
12443 return false;
12446 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12447 usually a constant depending on the ABI. However, in the ELFv2 ABI
12448 the register parameter area is optional when calling a function that
12449 has a prototype is scope, has no variable argument list, and passes
12450 all parameters in registers. */
12453 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12455 int reg_parm_stack_space;
12457 switch (DEFAULT_ABI)
12459 default:
12460 reg_parm_stack_space = 0;
12461 break;
12463 case ABI_AIX:
12464 case ABI_DARWIN:
12465 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12466 break;
12468 case ABI_ELFv2:
12469 /* ??? Recomputing this every time is a bit expensive. Is there
12470 a place to cache this information? */
12471 if (rs6000_function_parms_need_stack (fun, incoming))
12472 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12473 else
12474 reg_parm_stack_space = 0;
12475 break;
12478 return reg_parm_stack_space;
12481 static void
12482 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12484 int i;
12485 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12487 if (nregs == 0)
12488 return;
12490 for (i = 0; i < nregs; i++)
12492 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12493 if (reload_completed)
12495 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12496 tem = NULL_RTX;
12497 else
12498 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12499 i * GET_MODE_SIZE (reg_mode));
12501 else
12502 tem = replace_equiv_address (tem, XEXP (tem, 0));
12504 gcc_assert (tem);
12506 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12510 /* Perform any needed actions needed for a function that is receiving a
12511 variable number of arguments.
12513 CUM is as above.
12515 MODE and TYPE are the mode and type of the current parameter.
12517 PRETEND_SIZE is a variable that should be set to the amount of stack
12518 that must be pushed by the prolog to pretend that our caller pushed
12521 Normally, this macro will push all remaining incoming registers on the
12522 stack and set PRETEND_SIZE to the length of the registers pushed. */
12524 static void
12525 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12526 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12527 int no_rtl)
12529 CUMULATIVE_ARGS next_cum;
12530 int reg_size = TARGET_32BIT ? 4 : 8;
12531 rtx save_area = NULL_RTX, mem;
12532 int first_reg_offset;
12533 alias_set_type set;
12535 /* Skip the last named argument. */
12536 next_cum = *get_cumulative_args (cum);
12537 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12539 if (DEFAULT_ABI == ABI_V4)
12541 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12543 if (! no_rtl)
12545 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12546 HOST_WIDE_INT offset = 0;
12548 /* Try to optimize the size of the varargs save area.
12549 The ABI requires that ap.reg_save_area is doubleword
12550 aligned, but we don't need to allocate space for all
12551 the bytes, only those to which we actually will save
12552 anything. */
12553 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12554 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12555 if (TARGET_HARD_FLOAT && TARGET_FPRS
12556 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12557 && cfun->va_list_fpr_size)
12559 if (gpr_reg_num)
12560 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12561 * UNITS_PER_FP_WORD;
12562 if (cfun->va_list_fpr_size
12563 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12564 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12565 else
12566 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12567 * UNITS_PER_FP_WORD;
12569 if (gpr_reg_num)
12571 offset = -((first_reg_offset * reg_size) & ~7);
12572 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12574 gpr_reg_num = cfun->va_list_gpr_size;
12575 if (reg_size == 4 && (first_reg_offset & 1))
12576 gpr_reg_num++;
12578 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12580 else if (fpr_size)
12581 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12582 * UNITS_PER_FP_WORD
12583 - (int) (GP_ARG_NUM_REG * reg_size);
12585 if (gpr_size + fpr_size)
12587 rtx reg_save_area
12588 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12589 gcc_assert (GET_CODE (reg_save_area) == MEM);
12590 reg_save_area = XEXP (reg_save_area, 0);
12591 if (GET_CODE (reg_save_area) == PLUS)
12593 gcc_assert (XEXP (reg_save_area, 0)
12594 == virtual_stack_vars_rtx);
12595 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12596 offset += INTVAL (XEXP (reg_save_area, 1));
12598 else
12599 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12602 cfun->machine->varargs_save_offset = offset;
12603 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12606 else
12608 first_reg_offset = next_cum.words;
12609 save_area = crtl->args.internal_arg_pointer;
12611 if (targetm.calls.must_pass_in_stack (mode, type))
12612 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12615 set = get_varargs_alias_set ();
12616 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12617 && cfun->va_list_gpr_size)
12619 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12621 if (va_list_gpr_counter_field)
12622 /* V4 va_list_gpr_size counts number of registers needed. */
12623 n_gpr = cfun->va_list_gpr_size;
12624 else
12625 /* char * va_list instead counts number of bytes needed. */
12626 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12628 if (nregs > n_gpr)
12629 nregs = n_gpr;
12631 mem = gen_rtx_MEM (BLKmode,
12632 plus_constant (Pmode, save_area,
12633 first_reg_offset * reg_size));
12634 MEM_NOTRAP_P (mem) = 1;
12635 set_mem_alias_set (mem, set);
12636 set_mem_align (mem, BITS_PER_WORD);
12638 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12639 nregs);
12642 /* Save FP registers if needed. */
12643 if (DEFAULT_ABI == ABI_V4
12644 && TARGET_HARD_FLOAT && TARGET_FPRS
12645 && ! no_rtl
12646 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12647 && cfun->va_list_fpr_size)
12649 int fregno = next_cum.fregno, nregs;
12650 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12651 rtx lab = gen_label_rtx ();
12652 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12653 * UNITS_PER_FP_WORD);
12655 emit_jump_insn
12656 (gen_rtx_SET (pc_rtx,
12657 gen_rtx_IF_THEN_ELSE (VOIDmode,
12658 gen_rtx_NE (VOIDmode, cr1,
12659 const0_rtx),
12660 gen_rtx_LABEL_REF (VOIDmode, lab),
12661 pc_rtx)));
12663 for (nregs = 0;
12664 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12665 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12667 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12668 ? DFmode : SFmode,
12669 plus_constant (Pmode, save_area, off));
12670 MEM_NOTRAP_P (mem) = 1;
12671 set_mem_alias_set (mem, set);
12672 set_mem_align (mem, GET_MODE_ALIGNMENT (
12673 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12674 ? DFmode : SFmode));
12675 emit_move_insn (mem, gen_rtx_REG (
12676 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12677 ? DFmode : SFmode, fregno));
12680 emit_label (lab);
12684 /* Create the va_list data type. */
12686 static tree
12687 rs6000_build_builtin_va_list (void)
12689 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12691 /* For AIX, prefer 'char *' because that's what the system
12692 header files like. */
12693 if (DEFAULT_ABI != ABI_V4)
12694 return build_pointer_type (char_type_node);
12696 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12697 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12698 get_identifier ("__va_list_tag"), record);
12700 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12701 unsigned_char_type_node);
12702 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12703 unsigned_char_type_node);
12704 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12705 every user file. */
12706 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12707 get_identifier ("reserved"), short_unsigned_type_node);
12708 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12709 get_identifier ("overflow_arg_area"),
12710 ptr_type_node);
12711 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12712 get_identifier ("reg_save_area"),
12713 ptr_type_node);
12715 va_list_gpr_counter_field = f_gpr;
12716 va_list_fpr_counter_field = f_fpr;
12718 DECL_FIELD_CONTEXT (f_gpr) = record;
12719 DECL_FIELD_CONTEXT (f_fpr) = record;
12720 DECL_FIELD_CONTEXT (f_res) = record;
12721 DECL_FIELD_CONTEXT (f_ovf) = record;
12722 DECL_FIELD_CONTEXT (f_sav) = record;
12724 TYPE_STUB_DECL (record) = type_decl;
12725 TYPE_NAME (record) = type_decl;
12726 TYPE_FIELDS (record) = f_gpr;
12727 DECL_CHAIN (f_gpr) = f_fpr;
12728 DECL_CHAIN (f_fpr) = f_res;
12729 DECL_CHAIN (f_res) = f_ovf;
12730 DECL_CHAIN (f_ovf) = f_sav;
12732 layout_type (record);
12734 /* The correct type is an array type of one element. */
12735 return build_array_type (record, build_index_type (size_zero_node));
12738 /* Implement va_start. */
12740 static void
12741 rs6000_va_start (tree valist, rtx nextarg)
12743 HOST_WIDE_INT words, n_gpr, n_fpr;
12744 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12745 tree gpr, fpr, ovf, sav, t;
12747 /* Only SVR4 needs something special. */
12748 if (DEFAULT_ABI != ABI_V4)
12750 std_expand_builtin_va_start (valist, nextarg);
12751 return;
12754 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12755 f_fpr = DECL_CHAIN (f_gpr);
12756 f_res = DECL_CHAIN (f_fpr);
12757 f_ovf = DECL_CHAIN (f_res);
12758 f_sav = DECL_CHAIN (f_ovf);
12760 valist = build_simple_mem_ref (valist);
12761 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12762 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12763 f_fpr, NULL_TREE);
12764 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12765 f_ovf, NULL_TREE);
12766 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12767 f_sav, NULL_TREE);
12769 /* Count number of gp and fp argument registers used. */
12770 words = crtl->args.info.words;
12771 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12772 GP_ARG_NUM_REG);
12773 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12774 FP_ARG_NUM_REG);
12776 if (TARGET_DEBUG_ARG)
12777 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12778 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12779 words, n_gpr, n_fpr);
12781 if (cfun->va_list_gpr_size)
12783 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12784 build_int_cst (NULL_TREE, n_gpr));
12785 TREE_SIDE_EFFECTS (t) = 1;
12786 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12789 if (cfun->va_list_fpr_size)
12791 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12792 build_int_cst (NULL_TREE, n_fpr));
12793 TREE_SIDE_EFFECTS (t) = 1;
12794 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12796 #ifdef HAVE_AS_GNU_ATTRIBUTE
12797 if (call_ABI_of_interest (cfun->decl))
12798 rs6000_passes_float = true;
12799 #endif
12802 /* Find the overflow area. */
12803 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12804 if (words != 0)
12805 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12806 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12807 TREE_SIDE_EFFECTS (t) = 1;
12808 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12810 /* If there were no va_arg invocations, don't set up the register
12811 save area. */
12812 if (!cfun->va_list_gpr_size
12813 && !cfun->va_list_fpr_size
12814 && n_gpr < GP_ARG_NUM_REG
12815 && n_fpr < FP_ARG_V4_MAX_REG)
12816 return;
12818 /* Find the register save area. */
12819 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12820 if (cfun->machine->varargs_save_offset)
12821 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12822 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12823 TREE_SIDE_EFFECTS (t) = 1;
12824 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12827 /* Implement va_arg. */
12829 static tree
12830 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12831 gimple_seq *post_p)
12833 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12834 tree gpr, fpr, ovf, sav, reg, t, u;
12835 int size, rsize, n_reg, sav_ofs, sav_scale;
12836 tree lab_false, lab_over, addr;
12837 int align;
12838 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12839 int regalign = 0;
12840 gimple *stmt;
12842 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12844 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12845 return build_va_arg_indirect_ref (t);
12848 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12849 earlier version of gcc, with the property that it always applied alignment
12850 adjustments to the va-args (even for zero-sized types). The cheapest way
12851 to deal with this is to replicate the effect of the part of
12852 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12853 of relevance.
12854 We don't need to check for pass-by-reference because of the test above.
12855 We can return a simplifed answer, since we know there's no offset to add. */
12857 if (((TARGET_MACHO
12858 && rs6000_darwin64_abi)
12859 || DEFAULT_ABI == ABI_ELFv2
12860 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12861 && integer_zerop (TYPE_SIZE (type)))
12863 unsigned HOST_WIDE_INT align, boundary;
12864 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12865 align = PARM_BOUNDARY / BITS_PER_UNIT;
12866 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12867 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12868 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12869 boundary /= BITS_PER_UNIT;
12870 if (boundary > align)
12872 tree t ;
12873 /* This updates arg ptr by the amount that would be necessary
12874 to align the zero-sized (but not zero-alignment) item. */
12875 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12876 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12877 gimplify_and_add (t, pre_p);
12879 t = fold_convert (sizetype, valist_tmp);
12880 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12881 fold_convert (TREE_TYPE (valist),
12882 fold_build2 (BIT_AND_EXPR, sizetype, t,
12883 size_int (-boundary))));
12884 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12885 gimplify_and_add (t, pre_p);
12887 /* Since it is zero-sized there's no increment for the item itself. */
12888 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12889 return build_va_arg_indirect_ref (valist_tmp);
12892 if (DEFAULT_ABI != ABI_V4)
12894 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12896 tree elem_type = TREE_TYPE (type);
12897 machine_mode elem_mode = TYPE_MODE (elem_type);
12898 int elem_size = GET_MODE_SIZE (elem_mode);
12900 if (elem_size < UNITS_PER_WORD)
12902 tree real_part, imag_part;
12903 gimple_seq post = NULL;
12905 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12906 &post);
12907 /* Copy the value into a temporary, lest the formal temporary
12908 be reused out from under us. */
12909 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12910 gimple_seq_add_seq (pre_p, post);
12912 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12913 post_p);
12915 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12919 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12922 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12923 f_fpr = DECL_CHAIN (f_gpr);
12924 f_res = DECL_CHAIN (f_fpr);
12925 f_ovf = DECL_CHAIN (f_res);
12926 f_sav = DECL_CHAIN (f_ovf);
12928 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12929 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12930 f_fpr, NULL_TREE);
12931 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12932 f_ovf, NULL_TREE);
12933 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12934 f_sav, NULL_TREE);
12936 size = int_size_in_bytes (type);
12937 rsize = (size + 3) / 4;
12938 align = 1;
12940 machine_mode mode = TYPE_MODE (type);
12941 if (abi_v4_pass_in_fpr (mode))
12943 /* FP args go in FP registers, if present. */
12944 reg = fpr;
12945 n_reg = (size + 7) / 8;
12946 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
12947 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
12948 if (mode != SFmode && mode != SDmode)
12949 align = 8;
12951 else
12953 /* Otherwise into GP registers. */
12954 reg = gpr;
12955 n_reg = rsize;
12956 sav_ofs = 0;
12957 sav_scale = 4;
12958 if (n_reg == 2)
12959 align = 8;
12962 /* Pull the value out of the saved registers.... */
12964 lab_over = NULL;
12965 addr = create_tmp_var (ptr_type_node, "addr");
12967 /* AltiVec vectors never go in registers when -mabi=altivec. */
12968 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12969 align = 16;
12970 else
12972 lab_false = create_artificial_label (input_location);
12973 lab_over = create_artificial_label (input_location);
12975 /* Long long and SPE vectors are aligned in the registers.
12976 As are any other 2 gpr item such as complex int due to a
12977 historical mistake. */
12978 u = reg;
12979 if (n_reg == 2 && reg == gpr)
12981 regalign = 1;
12982 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12983 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12984 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12985 unshare_expr (reg), u);
12987 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12988 reg number is 0 for f1, so we want to make it odd. */
12989 else if (reg == fpr && mode == TDmode)
12991 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12992 build_int_cst (TREE_TYPE (reg), 1));
12993 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12996 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12997 t = build2 (GE_EXPR, boolean_type_node, u, t);
12998 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12999 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13000 gimplify_and_add (t, pre_p);
13002 t = sav;
13003 if (sav_ofs)
13004 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13006 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13007 build_int_cst (TREE_TYPE (reg), n_reg));
13008 u = fold_convert (sizetype, u);
13009 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13010 t = fold_build_pointer_plus (t, u);
13012 /* _Decimal32 varargs are located in the second word of the 64-bit
13013 FP register for 32-bit binaries. */
13014 if (TARGET_32BIT
13015 && TARGET_HARD_FLOAT && TARGET_FPRS
13016 && mode == SDmode)
13017 t = fold_build_pointer_plus_hwi (t, size);
13019 gimplify_assign (addr, t, pre_p);
13021 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13023 stmt = gimple_build_label (lab_false);
13024 gimple_seq_add_stmt (pre_p, stmt);
13026 if ((n_reg == 2 && !regalign) || n_reg > 2)
13028 /* Ensure that we don't find any more args in regs.
13029 Alignment has taken care of for special cases. */
13030 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13034 /* ... otherwise out of the overflow area. */
13036 /* Care for on-stack alignment if needed. */
13037 t = ovf;
13038 if (align != 1)
13040 t = fold_build_pointer_plus_hwi (t, align - 1);
13041 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13042 build_int_cst (TREE_TYPE (t), -align));
13044 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13046 gimplify_assign (unshare_expr (addr), t, pre_p);
13048 t = fold_build_pointer_plus_hwi (t, size);
13049 gimplify_assign (unshare_expr (ovf), t, pre_p);
13051 if (lab_over)
13053 stmt = gimple_build_label (lab_over);
13054 gimple_seq_add_stmt (pre_p, stmt);
13057 if (STRICT_ALIGNMENT
13058 && (TYPE_ALIGN (type)
13059 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13061 /* The value (of type complex double, for example) may not be
13062 aligned in memory in the saved registers, so copy via a
13063 temporary. (This is the same code as used for SPARC.) */
13064 tree tmp = create_tmp_var (type, "va_arg_tmp");
13065 tree dest_addr = build_fold_addr_expr (tmp);
13067 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13068 3, dest_addr, addr, size_int (rsize * 4));
13070 gimplify_and_add (copy, pre_p);
13071 addr = dest_addr;
13074 addr = fold_convert (ptrtype, addr);
13075 return build_va_arg_indirect_ref (addr);
13078 /* Builtins. */
13080 static void
13081 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13083 tree t;
13084 unsigned classify = rs6000_builtin_info[(int)code].attr;
13085 const char *attr_string = "";
13087 gcc_assert (name != NULL);
13088 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13090 if (rs6000_builtin_decls[(int)code])
13091 fatal_error (input_location,
13092 "internal error: builtin function %s already processed", name);
13094 rs6000_builtin_decls[(int)code] = t =
13095 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13097 /* Set any special attributes. */
13098 if ((classify & RS6000_BTC_CONST) != 0)
13100 /* const function, function only depends on the inputs. */
13101 TREE_READONLY (t) = 1;
13102 TREE_NOTHROW (t) = 1;
13103 attr_string = ", const";
13105 else if ((classify & RS6000_BTC_PURE) != 0)
13107 /* pure function, function can read global memory, but does not set any
13108 external state. */
13109 DECL_PURE_P (t) = 1;
13110 TREE_NOTHROW (t) = 1;
13111 attr_string = ", pure";
13113 else if ((classify & RS6000_BTC_FP) != 0)
13115 /* Function is a math function. If rounding mode is on, then treat the
13116 function as not reading global memory, but it can have arbitrary side
13117 effects. If it is off, then assume the function is a const function.
13118 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13119 builtin-attribute.def that is used for the math functions. */
13120 TREE_NOTHROW (t) = 1;
13121 if (flag_rounding_math)
13123 DECL_PURE_P (t) = 1;
13124 DECL_IS_NOVOPS (t) = 1;
13125 attr_string = ", fp, pure";
13127 else
13129 TREE_READONLY (t) = 1;
13130 attr_string = ", fp, const";
13133 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13134 gcc_unreachable ();
13136 if (TARGET_DEBUG_BUILTIN)
13137 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13138 (int)code, name, attr_string);
13141 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13143 #undef RS6000_BUILTIN_0
13144 #undef RS6000_BUILTIN_1
13145 #undef RS6000_BUILTIN_2
13146 #undef RS6000_BUILTIN_3
13147 #undef RS6000_BUILTIN_A
13148 #undef RS6000_BUILTIN_D
13149 #undef RS6000_BUILTIN_E
13150 #undef RS6000_BUILTIN_H
13151 #undef RS6000_BUILTIN_P
13152 #undef RS6000_BUILTIN_Q
13153 #undef RS6000_BUILTIN_S
13154 #undef RS6000_BUILTIN_X
13156 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13157 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13158 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13159 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13160 { MASK, ICODE, NAME, ENUM },
13162 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13163 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13164 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13165 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13166 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13167 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13168 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13169 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13171 static const struct builtin_description bdesc_3arg[] =
13173 #include "rs6000-builtin.def"
13176 /* DST operations: void foo (void *, const int, const char). */
13178 #undef RS6000_BUILTIN_0
13179 #undef RS6000_BUILTIN_1
13180 #undef RS6000_BUILTIN_2
13181 #undef RS6000_BUILTIN_3
13182 #undef RS6000_BUILTIN_A
13183 #undef RS6000_BUILTIN_D
13184 #undef RS6000_BUILTIN_E
13185 #undef RS6000_BUILTIN_H
13186 #undef RS6000_BUILTIN_P
13187 #undef RS6000_BUILTIN_Q
13188 #undef RS6000_BUILTIN_S
13189 #undef RS6000_BUILTIN_X
13191 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13192 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13193 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13194 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13195 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13196 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13197 { MASK, ICODE, NAME, ENUM },
13199 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13200 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13201 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13202 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13203 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13204 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13206 static const struct builtin_description bdesc_dst[] =
13208 #include "rs6000-builtin.def"
13211 /* Simple binary operations: VECc = foo (VECa, VECb). */
13213 #undef RS6000_BUILTIN_0
13214 #undef RS6000_BUILTIN_1
13215 #undef RS6000_BUILTIN_2
13216 #undef RS6000_BUILTIN_3
13217 #undef RS6000_BUILTIN_A
13218 #undef RS6000_BUILTIN_D
13219 #undef RS6000_BUILTIN_E
13220 #undef RS6000_BUILTIN_H
13221 #undef RS6000_BUILTIN_P
13222 #undef RS6000_BUILTIN_Q
13223 #undef RS6000_BUILTIN_S
13224 #undef RS6000_BUILTIN_X
13226 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13227 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13228 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13229 { MASK, ICODE, NAME, ENUM },
13231 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13232 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13233 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13234 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13235 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13236 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13237 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13238 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13239 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13241 static const struct builtin_description bdesc_2arg[] =
13243 #include "rs6000-builtin.def"
13246 #undef RS6000_BUILTIN_0
13247 #undef RS6000_BUILTIN_1
13248 #undef RS6000_BUILTIN_2
13249 #undef RS6000_BUILTIN_3
13250 #undef RS6000_BUILTIN_A
13251 #undef RS6000_BUILTIN_D
13252 #undef RS6000_BUILTIN_E
13253 #undef RS6000_BUILTIN_H
13254 #undef RS6000_BUILTIN_P
13255 #undef RS6000_BUILTIN_Q
13256 #undef RS6000_BUILTIN_S
13257 #undef RS6000_BUILTIN_X
13259 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13260 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13261 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13262 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13263 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13264 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13265 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13266 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13267 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13268 { MASK, ICODE, NAME, ENUM },
13270 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13271 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13272 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13274 /* AltiVec predicates. */
13276 static const struct builtin_description bdesc_altivec_preds[] =
13278 #include "rs6000-builtin.def"
13281 /* SPE predicates. */
13282 #undef RS6000_BUILTIN_0
13283 #undef RS6000_BUILTIN_1
13284 #undef RS6000_BUILTIN_2
13285 #undef RS6000_BUILTIN_3
13286 #undef RS6000_BUILTIN_A
13287 #undef RS6000_BUILTIN_D
13288 #undef RS6000_BUILTIN_E
13289 #undef RS6000_BUILTIN_H
13290 #undef RS6000_BUILTIN_P
13291 #undef RS6000_BUILTIN_Q
13292 #undef RS6000_BUILTIN_S
13293 #undef RS6000_BUILTIN_X
13295 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13296 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13297 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13298 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13299 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13300 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13301 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13302 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13303 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13304 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13305 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
13306 { MASK, ICODE, NAME, ENUM },
13308 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13310 static const struct builtin_description bdesc_spe_predicates[] =
13312 #include "rs6000-builtin.def"
13315 /* SPE evsel predicates. */
13316 #undef RS6000_BUILTIN_0
13317 #undef RS6000_BUILTIN_1
13318 #undef RS6000_BUILTIN_2
13319 #undef RS6000_BUILTIN_3
13320 #undef RS6000_BUILTIN_A
13321 #undef RS6000_BUILTIN_D
13322 #undef RS6000_BUILTIN_E
13323 #undef RS6000_BUILTIN_H
13324 #undef RS6000_BUILTIN_P
13325 #undef RS6000_BUILTIN_Q
13326 #undef RS6000_BUILTIN_S
13327 #undef RS6000_BUILTIN_X
13329 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13330 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13331 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13332 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13333 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13334 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13335 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
13336 { MASK, ICODE, NAME, ENUM },
13338 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13339 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13340 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13341 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13342 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13344 static const struct builtin_description bdesc_spe_evsel[] =
13346 #include "rs6000-builtin.def"
13349 /* PAIRED predicates. */
13350 #undef RS6000_BUILTIN_0
13351 #undef RS6000_BUILTIN_1
13352 #undef RS6000_BUILTIN_2
13353 #undef RS6000_BUILTIN_3
13354 #undef RS6000_BUILTIN_A
13355 #undef RS6000_BUILTIN_D
13356 #undef RS6000_BUILTIN_E
13357 #undef RS6000_BUILTIN_H
13358 #undef RS6000_BUILTIN_P
13359 #undef RS6000_BUILTIN_Q
13360 #undef RS6000_BUILTIN_S
13361 #undef RS6000_BUILTIN_X
13363 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13364 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13365 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13366 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13367 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13368 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13369 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13370 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13371 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13372 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13373 { MASK, ICODE, NAME, ENUM },
13375 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13376 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13378 static const struct builtin_description bdesc_paired_preds[] =
13380 #include "rs6000-builtin.def"
13383 /* ABS* operations. */
13385 #undef RS6000_BUILTIN_0
13386 #undef RS6000_BUILTIN_1
13387 #undef RS6000_BUILTIN_2
13388 #undef RS6000_BUILTIN_3
13389 #undef RS6000_BUILTIN_A
13390 #undef RS6000_BUILTIN_D
13391 #undef RS6000_BUILTIN_E
13392 #undef RS6000_BUILTIN_H
13393 #undef RS6000_BUILTIN_P
13394 #undef RS6000_BUILTIN_Q
13395 #undef RS6000_BUILTIN_S
13396 #undef RS6000_BUILTIN_X
13398 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13399 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13400 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13401 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13402 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13403 { MASK, ICODE, NAME, ENUM },
13405 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13406 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13407 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13408 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13409 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13410 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13411 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13413 static const struct builtin_description bdesc_abs[] =
13415 #include "rs6000-builtin.def"
13418 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13419 foo (VECa). */
13421 #undef RS6000_BUILTIN_0
13422 #undef RS6000_BUILTIN_1
13423 #undef RS6000_BUILTIN_2
13424 #undef RS6000_BUILTIN_3
13425 #undef RS6000_BUILTIN_A
13426 #undef RS6000_BUILTIN_D
13427 #undef RS6000_BUILTIN_E
13428 #undef RS6000_BUILTIN_H
13429 #undef RS6000_BUILTIN_P
13430 #undef RS6000_BUILTIN_Q
13431 #undef RS6000_BUILTIN_S
13432 #undef RS6000_BUILTIN_X
13434 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13435 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13436 { MASK, ICODE, NAME, ENUM },
13438 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13439 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13440 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13441 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13442 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13443 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13444 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13445 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13446 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13447 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13449 static const struct builtin_description bdesc_1arg[] =
13451 #include "rs6000-builtin.def"
13454 /* Simple no-argument operations: result = __builtin_darn_32 () */
13456 #undef RS6000_BUILTIN_0
13457 #undef RS6000_BUILTIN_1
13458 #undef RS6000_BUILTIN_2
13459 #undef RS6000_BUILTIN_3
13460 #undef RS6000_BUILTIN_A
13461 #undef RS6000_BUILTIN_D
13462 #undef RS6000_BUILTIN_E
13463 #undef RS6000_BUILTIN_H
13464 #undef RS6000_BUILTIN_P
13465 #undef RS6000_BUILTIN_Q
13466 #undef RS6000_BUILTIN_S
13467 #undef RS6000_BUILTIN_X
13469 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13470 { MASK, ICODE, NAME, ENUM },
13472 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13473 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13474 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13475 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13476 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13477 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13478 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13479 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13480 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13481 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13482 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13484 static const struct builtin_description bdesc_0arg[] =
13486 #include "rs6000-builtin.def"
13489 /* HTM builtins. */
13490 #undef RS6000_BUILTIN_0
13491 #undef RS6000_BUILTIN_1
13492 #undef RS6000_BUILTIN_2
13493 #undef RS6000_BUILTIN_3
13494 #undef RS6000_BUILTIN_A
13495 #undef RS6000_BUILTIN_D
13496 #undef RS6000_BUILTIN_E
13497 #undef RS6000_BUILTIN_H
13498 #undef RS6000_BUILTIN_P
13499 #undef RS6000_BUILTIN_Q
13500 #undef RS6000_BUILTIN_S
13501 #undef RS6000_BUILTIN_X
13503 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13504 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13505 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13506 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13507 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13508 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13509 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13510 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13511 { MASK, ICODE, NAME, ENUM },
13513 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13514 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13515 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13516 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13518 static const struct builtin_description bdesc_htm[] =
13520 #include "rs6000-builtin.def"
13523 #undef RS6000_BUILTIN_0
13524 #undef RS6000_BUILTIN_1
13525 #undef RS6000_BUILTIN_2
13526 #undef RS6000_BUILTIN_3
13527 #undef RS6000_BUILTIN_A
13528 #undef RS6000_BUILTIN_D
13529 #undef RS6000_BUILTIN_E
13530 #undef RS6000_BUILTIN_H
13531 #undef RS6000_BUILTIN_P
13532 #undef RS6000_BUILTIN_Q
13533 #undef RS6000_BUILTIN_S
13535 /* Return true if a builtin function is overloaded. */
13536 bool
13537 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13539 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13542 /* Expand an expression EXP that calls a builtin without arguments. */
13543 static rtx
13544 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13546 rtx pat;
13547 machine_mode tmode = insn_data[icode].operand[0].mode;
13549 if (icode == CODE_FOR_nothing)
13550 /* Builtin not supported on this processor. */
13551 return 0;
13553 if (target == 0
13554 || GET_MODE (target) != tmode
13555 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13556 target = gen_reg_rtx (tmode);
13558 pat = GEN_FCN (icode) (target);
13559 if (! pat)
13560 return 0;
13561 emit_insn (pat);
13563 return target;
13567 static rtx
13568 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13570 rtx pat;
13571 tree arg0 = CALL_EXPR_ARG (exp, 0);
13572 tree arg1 = CALL_EXPR_ARG (exp, 1);
13573 rtx op0 = expand_normal (arg0);
13574 rtx op1 = expand_normal (arg1);
13575 machine_mode mode0 = insn_data[icode].operand[0].mode;
13576 machine_mode mode1 = insn_data[icode].operand[1].mode;
13578 if (icode == CODE_FOR_nothing)
13579 /* Builtin not supported on this processor. */
13580 return 0;
13582 /* If we got invalid arguments bail out before generating bad rtl. */
13583 if (arg0 == error_mark_node || arg1 == error_mark_node)
13584 return const0_rtx;
13586 if (GET_CODE (op0) != CONST_INT
13587 || INTVAL (op0) > 255
13588 || INTVAL (op0) < 0)
13590 error ("argument 1 must be an 8-bit field value");
13591 return const0_rtx;
13594 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13595 op0 = copy_to_mode_reg (mode0, op0);
13597 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13598 op1 = copy_to_mode_reg (mode1, op1);
13600 pat = GEN_FCN (icode) (op0, op1);
13601 if (! pat)
13602 return const0_rtx;
13603 emit_insn (pat);
13605 return NULL_RTX;
13608 static rtx
13609 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13611 rtx pat;
13612 tree arg0 = CALL_EXPR_ARG (exp, 0);
13613 rtx op0 = expand_normal (arg0);
13614 machine_mode tmode = insn_data[icode].operand[0].mode;
13615 machine_mode mode0 = insn_data[icode].operand[1].mode;
13617 if (icode == CODE_FOR_nothing)
13618 /* Builtin not supported on this processor. */
13619 return 0;
13621 /* If we got invalid arguments bail out before generating bad rtl. */
13622 if (arg0 == error_mark_node)
13623 return const0_rtx;
13625 if (icode == CODE_FOR_altivec_vspltisb
13626 || icode == CODE_FOR_altivec_vspltish
13627 || icode == CODE_FOR_altivec_vspltisw
13628 || icode == CODE_FOR_spe_evsplatfi
13629 || icode == CODE_FOR_spe_evsplati)
13631 /* Only allow 5-bit *signed* literals. */
13632 if (GET_CODE (op0) != CONST_INT
13633 || INTVAL (op0) > 15
13634 || INTVAL (op0) < -16)
13636 error ("argument 1 must be a 5-bit signed literal");
13637 return const0_rtx;
13641 if (target == 0
13642 || GET_MODE (target) != tmode
13643 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13644 target = gen_reg_rtx (tmode);
13646 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13647 op0 = copy_to_mode_reg (mode0, op0);
13649 pat = GEN_FCN (icode) (target, op0);
13650 if (! pat)
13651 return 0;
13652 emit_insn (pat);
13654 return target;
13657 static rtx
13658 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13660 rtx pat, scratch1, scratch2;
13661 tree arg0 = CALL_EXPR_ARG (exp, 0);
13662 rtx op0 = expand_normal (arg0);
13663 machine_mode tmode = insn_data[icode].operand[0].mode;
13664 machine_mode mode0 = insn_data[icode].operand[1].mode;
13666 /* If we have invalid arguments, bail out before generating bad rtl. */
13667 if (arg0 == error_mark_node)
13668 return const0_rtx;
13670 if (target == 0
13671 || GET_MODE (target) != tmode
13672 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13673 target = gen_reg_rtx (tmode);
13675 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13676 op0 = copy_to_mode_reg (mode0, op0);
13678 scratch1 = gen_reg_rtx (mode0);
13679 scratch2 = gen_reg_rtx (mode0);
13681 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13682 if (! pat)
13683 return 0;
13684 emit_insn (pat);
13686 return target;
13689 static rtx
13690 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13692 rtx pat;
13693 tree arg0 = CALL_EXPR_ARG (exp, 0);
13694 tree arg1 = CALL_EXPR_ARG (exp, 1);
13695 rtx op0 = expand_normal (arg0);
13696 rtx op1 = expand_normal (arg1);
13697 machine_mode tmode = insn_data[icode].operand[0].mode;
13698 machine_mode mode0 = insn_data[icode].operand[1].mode;
13699 machine_mode mode1 = insn_data[icode].operand[2].mode;
13701 if (icode == CODE_FOR_nothing)
13702 /* Builtin not supported on this processor. */
13703 return 0;
13705 /* If we got invalid arguments bail out before generating bad rtl. */
13706 if (arg0 == error_mark_node || arg1 == error_mark_node)
13707 return const0_rtx;
13709 if (icode == CODE_FOR_altivec_vcfux
13710 || icode == CODE_FOR_altivec_vcfsx
13711 || icode == CODE_FOR_altivec_vctsxs
13712 || icode == CODE_FOR_altivec_vctuxs
13713 || icode == CODE_FOR_altivec_vspltb
13714 || icode == CODE_FOR_altivec_vsplth
13715 || icode == CODE_FOR_altivec_vspltw
13716 || icode == CODE_FOR_spe_evaddiw
13717 || icode == CODE_FOR_spe_evldd
13718 || icode == CODE_FOR_spe_evldh
13719 || icode == CODE_FOR_spe_evldw
13720 || icode == CODE_FOR_spe_evlhhesplat
13721 || icode == CODE_FOR_spe_evlhhossplat
13722 || icode == CODE_FOR_spe_evlhhousplat
13723 || icode == CODE_FOR_spe_evlwhe
13724 || icode == CODE_FOR_spe_evlwhos
13725 || icode == CODE_FOR_spe_evlwhou
13726 || icode == CODE_FOR_spe_evlwhsplat
13727 || icode == CODE_FOR_spe_evlwwsplat
13728 || icode == CODE_FOR_spe_evrlwi
13729 || icode == CODE_FOR_spe_evslwi
13730 || icode == CODE_FOR_spe_evsrwis
13731 || icode == CODE_FOR_spe_evsubifw
13732 || icode == CODE_FOR_spe_evsrwiu)
13734 /* Only allow 5-bit unsigned literals. */
13735 STRIP_NOPS (arg1);
13736 if (TREE_CODE (arg1) != INTEGER_CST
13737 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13739 error ("argument 2 must be a 5-bit unsigned literal");
13740 return const0_rtx;
13743 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13744 || icode == CODE_FOR_dfptstsfi_lt_dd
13745 || icode == CODE_FOR_dfptstsfi_gt_dd
13746 || icode == CODE_FOR_dfptstsfi_unordered_dd
13747 || icode == CODE_FOR_dfptstsfi_eq_td
13748 || icode == CODE_FOR_dfptstsfi_lt_td
13749 || icode == CODE_FOR_dfptstsfi_gt_td
13750 || icode == CODE_FOR_dfptstsfi_unordered_td)
13752 /* Only allow 6-bit unsigned literals. */
13753 STRIP_NOPS (arg0);
13754 if (TREE_CODE (arg0) != INTEGER_CST
13755 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13757 error ("argument 1 must be a 6-bit unsigned literal");
13758 return CONST0_RTX (tmode);
13762 if (target == 0
13763 || GET_MODE (target) != tmode
13764 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13765 target = gen_reg_rtx (tmode);
13767 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13768 op0 = copy_to_mode_reg (mode0, op0);
13769 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13770 op1 = copy_to_mode_reg (mode1, op1);
13772 pat = GEN_FCN (icode) (target, op0, op1);
13773 if (! pat)
13774 return 0;
13775 emit_insn (pat);
13777 return target;
13780 static rtx
13781 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13783 rtx pat, scratch;
13784 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13785 tree arg0 = CALL_EXPR_ARG (exp, 1);
13786 tree arg1 = CALL_EXPR_ARG (exp, 2);
13787 rtx op0 = expand_normal (arg0);
13788 rtx op1 = expand_normal (arg1);
13789 machine_mode tmode = SImode;
13790 machine_mode mode0 = insn_data[icode].operand[1].mode;
13791 machine_mode mode1 = insn_data[icode].operand[2].mode;
13792 int cr6_form_int;
13794 if (TREE_CODE (cr6_form) != INTEGER_CST)
13796 error ("argument 1 of __builtin_altivec_predicate must be a constant");
13797 return const0_rtx;
13799 else
13800 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13802 gcc_assert (mode0 == mode1);
13804 /* If we have invalid arguments, bail out before generating bad rtl. */
13805 if (arg0 == error_mark_node || arg1 == error_mark_node)
13806 return const0_rtx;
13808 if (target == 0
13809 || GET_MODE (target) != tmode
13810 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13811 target = gen_reg_rtx (tmode);
13813 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13814 op0 = copy_to_mode_reg (mode0, op0);
13815 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13816 op1 = copy_to_mode_reg (mode1, op1);
13818 scratch = gen_reg_rtx (mode0);
13820 pat = GEN_FCN (icode) (scratch, op0, op1);
13821 if (! pat)
13822 return 0;
13823 emit_insn (pat);
13825 /* The vec_any* and vec_all* predicates use the same opcodes for two
13826 different operations, but the bits in CR6 will be different
13827 depending on what information we want. So we have to play tricks
13828 with CR6 to get the right bits out.
13830 If you think this is disgusting, look at the specs for the
13831 AltiVec predicates. */
13833 switch (cr6_form_int)
13835 case 0:
13836 emit_insn (gen_cr6_test_for_zero (target));
13837 break;
13838 case 1:
13839 emit_insn (gen_cr6_test_for_zero_reverse (target));
13840 break;
13841 case 2:
13842 emit_insn (gen_cr6_test_for_lt (target));
13843 break;
13844 case 3:
13845 emit_insn (gen_cr6_test_for_lt_reverse (target));
13846 break;
13847 default:
13848 error ("argument 1 of __builtin_altivec_predicate is out of range");
13849 break;
13852 return target;
13855 static rtx
13856 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
13858 rtx pat, addr;
13859 tree arg0 = CALL_EXPR_ARG (exp, 0);
13860 tree arg1 = CALL_EXPR_ARG (exp, 1);
13861 machine_mode tmode = insn_data[icode].operand[0].mode;
13862 machine_mode mode0 = Pmode;
13863 machine_mode mode1 = Pmode;
13864 rtx op0 = expand_normal (arg0);
13865 rtx op1 = expand_normal (arg1);
13867 if (icode == CODE_FOR_nothing)
13868 /* Builtin not supported on this processor. */
13869 return 0;
13871 /* If we got invalid arguments bail out before generating bad rtl. */
13872 if (arg0 == error_mark_node || arg1 == error_mark_node)
13873 return const0_rtx;
13875 if (target == 0
13876 || GET_MODE (target) != tmode
13877 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13878 target = gen_reg_rtx (tmode);
13880 op1 = copy_to_mode_reg (mode1, op1);
13882 if (op0 == const0_rtx)
13884 addr = gen_rtx_MEM (tmode, op1);
13886 else
13888 op0 = copy_to_mode_reg (mode0, op0);
13889 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
13892 pat = GEN_FCN (icode) (target, addr);
13894 if (! pat)
13895 return 0;
13896 emit_insn (pat);
13898 return target;
13901 /* Return a constant vector for use as a little-endian permute control vector
13902 to reverse the order of elements of the given vector mode. */
13903 static rtx
13904 swap_selector_for_mode (machine_mode mode)
13906 /* These are little endian vectors, so their elements are reversed
13907 from what you would normally expect for a permute control vector. */
13908 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13909 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13910 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13911 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
13912 unsigned int *swaparray, i;
13913 rtx perm[16];
13915 switch (mode)
13917 case V2DFmode:
13918 case V2DImode:
13919 swaparray = swap2;
13920 break;
13921 case V4SFmode:
13922 case V4SImode:
13923 swaparray = swap4;
13924 break;
13925 case V8HImode:
13926 swaparray = swap8;
13927 break;
13928 case V16QImode:
13929 swaparray = swap16;
13930 break;
13931 default:
13932 gcc_unreachable ();
13935 for (i = 0; i < 16; ++i)
13936 perm[i] = GEN_INT (swaparray[i]);
13938 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
13941 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
13942 with -maltivec=be specified. Issue the load followed by an element-
13943 reversing permute. */
13944 void
13945 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13947 rtx tmp = gen_reg_rtx (mode);
13948 rtx load = gen_rtx_SET (tmp, op1);
13949 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
13950 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
13951 rtx sel = swap_selector_for_mode (mode);
13952 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
13954 gcc_assert (REG_P (op0));
13955 emit_insn (par);
13956 emit_insn (gen_rtx_SET (op0, vperm));
13959 /* Generate code for a "stvxl" built-in for a little endian target with
13960 -maltivec=be specified. Issue the store preceded by an element-reversing
13961 permute. */
13962 void
13963 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13965 rtx tmp = gen_reg_rtx (mode);
13966 rtx store = gen_rtx_SET (op0, tmp);
13967 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
13968 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
13969 rtx sel = swap_selector_for_mode (mode);
13970 rtx vperm;
13972 gcc_assert (REG_P (op1));
13973 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
13974 emit_insn (gen_rtx_SET (tmp, vperm));
13975 emit_insn (par);
13978 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
13979 specified. Issue the store preceded by an element-reversing permute. */
13980 void
13981 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13983 machine_mode inner_mode = GET_MODE_INNER (mode);
13984 rtx tmp = gen_reg_rtx (mode);
13985 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
13986 rtx sel = swap_selector_for_mode (mode);
13987 rtx vperm;
13989 gcc_assert (REG_P (op1));
13990 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
13991 emit_insn (gen_rtx_SET (tmp, vperm));
13992 emit_insn (gen_rtx_SET (op0, stvx));
13995 static rtx
13996 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13998 rtx pat, addr;
13999 tree arg0 = CALL_EXPR_ARG (exp, 0);
14000 tree arg1 = CALL_EXPR_ARG (exp, 1);
14001 machine_mode tmode = insn_data[icode].operand[0].mode;
14002 machine_mode mode0 = Pmode;
14003 machine_mode mode1 = Pmode;
14004 rtx op0 = expand_normal (arg0);
14005 rtx op1 = expand_normal (arg1);
14007 if (icode == CODE_FOR_nothing)
14008 /* Builtin not supported on this processor. */
14009 return 0;
14011 /* If we got invalid arguments bail out before generating bad rtl. */
14012 if (arg0 == error_mark_node || arg1 == error_mark_node)
14013 return const0_rtx;
14015 if (target == 0
14016 || GET_MODE (target) != tmode
14017 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14018 target = gen_reg_rtx (tmode);
14020 op1 = copy_to_mode_reg (mode1, op1);
14022 /* For LVX, express the RTL accurately by ANDing the address with -16.
14023 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14024 so the raw address is fine. */
14025 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14026 || icode == CODE_FOR_altivec_lvx_v2di_2op
14027 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14028 || icode == CODE_FOR_altivec_lvx_v4si_2op
14029 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14030 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14032 rtx rawaddr;
14033 if (op0 == const0_rtx)
14034 rawaddr = op1;
14035 else
14037 op0 = copy_to_mode_reg (mode0, op0);
14038 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14040 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14041 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14043 /* For -maltivec=be, emit the load and follow it up with a
14044 permute to swap the elements. */
14045 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14047 rtx temp = gen_reg_rtx (tmode);
14048 emit_insn (gen_rtx_SET (temp, addr));
14050 rtx sel = swap_selector_for_mode (tmode);
14051 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14052 UNSPEC_VPERM);
14053 emit_insn (gen_rtx_SET (target, vperm));
14055 else
14056 emit_insn (gen_rtx_SET (target, addr));
14058 else
14060 if (op0 == const0_rtx)
14061 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14062 else
14064 op0 = copy_to_mode_reg (mode0, op0);
14065 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14066 gen_rtx_PLUS (Pmode, op1, op0));
14069 pat = GEN_FCN (icode) (target, addr);
14070 if (! pat)
14071 return 0;
14072 emit_insn (pat);
14075 return target;
14078 static rtx
14079 spe_expand_stv_builtin (enum insn_code icode, tree exp)
14081 tree arg0 = CALL_EXPR_ARG (exp, 0);
14082 tree arg1 = CALL_EXPR_ARG (exp, 1);
14083 tree arg2 = CALL_EXPR_ARG (exp, 2);
14084 rtx op0 = expand_normal (arg0);
14085 rtx op1 = expand_normal (arg1);
14086 rtx op2 = expand_normal (arg2);
14087 rtx pat;
14088 machine_mode mode0 = insn_data[icode].operand[0].mode;
14089 machine_mode mode1 = insn_data[icode].operand[1].mode;
14090 machine_mode mode2 = insn_data[icode].operand[2].mode;
14092 /* Invalid arguments. Bail before doing anything stoopid! */
14093 if (arg0 == error_mark_node
14094 || arg1 == error_mark_node
14095 || arg2 == error_mark_node)
14096 return const0_rtx;
14098 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
14099 op0 = copy_to_mode_reg (mode2, op0);
14100 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
14101 op1 = copy_to_mode_reg (mode0, op1);
14102 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
14103 op2 = copy_to_mode_reg (mode1, op2);
14105 pat = GEN_FCN (icode) (op1, op2, op0);
14106 if (pat)
14107 emit_insn (pat);
14108 return NULL_RTX;
14111 static rtx
14112 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14114 tree arg0 = CALL_EXPR_ARG (exp, 0);
14115 tree arg1 = CALL_EXPR_ARG (exp, 1);
14116 tree arg2 = CALL_EXPR_ARG (exp, 2);
14117 rtx op0 = expand_normal (arg0);
14118 rtx op1 = expand_normal (arg1);
14119 rtx op2 = expand_normal (arg2);
14120 rtx pat, addr;
14121 machine_mode tmode = insn_data[icode].operand[0].mode;
14122 machine_mode mode1 = Pmode;
14123 machine_mode mode2 = Pmode;
14125 /* Invalid arguments. Bail before doing anything stoopid! */
14126 if (arg0 == error_mark_node
14127 || arg1 == error_mark_node
14128 || arg2 == error_mark_node)
14129 return const0_rtx;
14131 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14132 op0 = copy_to_mode_reg (tmode, op0);
14134 op2 = copy_to_mode_reg (mode2, op2);
14136 if (op1 == const0_rtx)
14138 addr = gen_rtx_MEM (tmode, op2);
14140 else
14142 op1 = copy_to_mode_reg (mode1, op1);
14143 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14146 pat = GEN_FCN (icode) (addr, op0);
14147 if (pat)
14148 emit_insn (pat);
14149 return NULL_RTX;
14152 static rtx
14153 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14155 tree arg0 = CALL_EXPR_ARG (exp, 0);
14156 tree arg1 = CALL_EXPR_ARG (exp, 1);
14157 tree arg2 = CALL_EXPR_ARG (exp, 2);
14158 rtx op0 = expand_normal (arg0);
14159 rtx op1 = expand_normal (arg1);
14160 rtx op2 = expand_normal (arg2);
14161 rtx pat, addr, rawaddr;
14162 machine_mode tmode = insn_data[icode].operand[0].mode;
14163 machine_mode smode = insn_data[icode].operand[1].mode;
14164 machine_mode mode1 = Pmode;
14165 machine_mode mode2 = Pmode;
14167 /* Invalid arguments. Bail before doing anything stoopid! */
14168 if (arg0 == error_mark_node
14169 || arg1 == error_mark_node
14170 || arg2 == error_mark_node)
14171 return const0_rtx;
14173 op2 = copy_to_mode_reg (mode2, op2);
14175 /* For STVX, express the RTL accurately by ANDing the address with -16.
14176 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14177 so the raw address is fine. */
14178 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14179 || icode == CODE_FOR_altivec_stvx_v2di_2op
14180 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14181 || icode == CODE_FOR_altivec_stvx_v4si_2op
14182 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14183 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14185 if (op1 == const0_rtx)
14186 rawaddr = op2;
14187 else
14189 op1 = copy_to_mode_reg (mode1, op1);
14190 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14193 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14194 addr = gen_rtx_MEM (tmode, addr);
14196 op0 = copy_to_mode_reg (tmode, op0);
14198 /* For -maltivec=be, emit a permute to swap the elements, followed
14199 by the store. */
14200 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14202 rtx temp = gen_reg_rtx (tmode);
14203 rtx sel = swap_selector_for_mode (tmode);
14204 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14205 UNSPEC_VPERM);
14206 emit_insn (gen_rtx_SET (temp, vperm));
14207 emit_insn (gen_rtx_SET (addr, temp));
14209 else
14210 emit_insn (gen_rtx_SET (addr, op0));
14212 else
14214 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14215 op0 = copy_to_mode_reg (smode, op0);
14217 if (op1 == const0_rtx)
14218 addr = gen_rtx_MEM (tmode, op2);
14219 else
14221 op1 = copy_to_mode_reg (mode1, op1);
14222 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14225 pat = GEN_FCN (icode) (addr, op0);
14226 if (pat)
14227 emit_insn (pat);
14230 return NULL_RTX;
14233 /* Return the appropriate SPR number associated with the given builtin. */
14234 static inline HOST_WIDE_INT
14235 htm_spr_num (enum rs6000_builtins code)
14237 if (code == HTM_BUILTIN_GET_TFHAR
14238 || code == HTM_BUILTIN_SET_TFHAR)
14239 return TFHAR_SPR;
14240 else if (code == HTM_BUILTIN_GET_TFIAR
14241 || code == HTM_BUILTIN_SET_TFIAR)
14242 return TFIAR_SPR;
14243 else if (code == HTM_BUILTIN_GET_TEXASR
14244 || code == HTM_BUILTIN_SET_TEXASR)
14245 return TEXASR_SPR;
14246 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14247 || code == HTM_BUILTIN_SET_TEXASRU);
14248 return TEXASRU_SPR;
14251 /* Return the appropriate SPR regno associated with the given builtin. */
14252 static inline HOST_WIDE_INT
14253 htm_spr_regno (enum rs6000_builtins code)
14255 if (code == HTM_BUILTIN_GET_TFHAR
14256 || code == HTM_BUILTIN_SET_TFHAR)
14257 return TFHAR_REGNO;
14258 else if (code == HTM_BUILTIN_GET_TFIAR
14259 || code == HTM_BUILTIN_SET_TFIAR)
14260 return TFIAR_REGNO;
14261 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14262 || code == HTM_BUILTIN_SET_TEXASR
14263 || code == HTM_BUILTIN_GET_TEXASRU
14264 || code == HTM_BUILTIN_SET_TEXASRU);
14265 return TEXASR_REGNO;
14268 /* Return the correct ICODE value depending on whether we are
14269 setting or reading the HTM SPRs. */
14270 static inline enum insn_code
14271 rs6000_htm_spr_icode (bool nonvoid)
14273 if (nonvoid)
14274 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14275 else
14276 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14279 /* Expand the HTM builtin in EXP and store the result in TARGET.
14280 Store true in *EXPANDEDP if we found a builtin to expand. */
14281 static rtx
14282 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14284 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14285 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14286 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14287 const struct builtin_description *d;
14288 size_t i;
14290 *expandedp = true;
14292 if (!TARGET_POWERPC64
14293 && (fcode == HTM_BUILTIN_TABORTDC
14294 || fcode == HTM_BUILTIN_TABORTDCI))
14296 size_t uns_fcode = (size_t)fcode;
14297 const char *name = rs6000_builtin_info[uns_fcode].name;
14298 error ("builtin %s is only valid in 64-bit mode", name);
14299 return const0_rtx;
14302 /* Expand the HTM builtins. */
14303 d = bdesc_htm;
14304 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14305 if (d->code == fcode)
14307 rtx op[MAX_HTM_OPERANDS], pat;
14308 int nopnds = 0;
14309 tree arg;
14310 call_expr_arg_iterator iter;
14311 unsigned attr = rs6000_builtin_info[fcode].attr;
14312 enum insn_code icode = d->icode;
14313 const struct insn_operand_data *insn_op;
14314 bool uses_spr = (attr & RS6000_BTC_SPR);
14315 rtx cr = NULL_RTX;
14317 if (uses_spr)
14318 icode = rs6000_htm_spr_icode (nonvoid);
14319 insn_op = &insn_data[icode].operand[0];
14321 if (nonvoid)
14323 machine_mode tmode = (uses_spr) ? insn_op->mode : SImode;
14324 if (!target
14325 || GET_MODE (target) != tmode
14326 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14327 target = gen_reg_rtx (tmode);
14328 if (uses_spr)
14329 op[nopnds++] = target;
14332 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14334 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14335 return const0_rtx;
14337 insn_op = &insn_data[icode].operand[nopnds];
14339 op[nopnds] = expand_normal (arg);
14341 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14343 if (!strcmp (insn_op->constraint, "n"))
14345 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14346 if (!CONST_INT_P (op[nopnds]))
14347 error ("argument %d must be an unsigned literal", arg_num);
14348 else
14349 error ("argument %d is an unsigned literal that is "
14350 "out of range", arg_num);
14351 return const0_rtx;
14353 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14356 nopnds++;
14359 /* Handle the builtins for extended mnemonics. These accept
14360 no arguments, but map to builtins that take arguments. */
14361 switch (fcode)
14363 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14364 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14365 op[nopnds++] = GEN_INT (1);
14366 if (flag_checking)
14367 attr |= RS6000_BTC_UNARY;
14368 break;
14369 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14370 op[nopnds++] = GEN_INT (0);
14371 if (flag_checking)
14372 attr |= RS6000_BTC_UNARY;
14373 break;
14374 default:
14375 break;
14378 /* If this builtin accesses SPRs, then pass in the appropriate
14379 SPR number and SPR regno as the last two operands. */
14380 if (uses_spr)
14382 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14383 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14384 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14386 /* If this builtin accesses a CR, then pass in a scratch
14387 CR as the last operand. */
14388 else if (attr & RS6000_BTC_CR)
14389 { cr = gen_reg_rtx (CCmode);
14390 op[nopnds++] = cr;
14393 if (flag_checking)
14395 int expected_nopnds = 0;
14396 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14397 expected_nopnds = 1;
14398 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14399 expected_nopnds = 2;
14400 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14401 expected_nopnds = 3;
14402 if (!(attr & RS6000_BTC_VOID))
14403 expected_nopnds += 1;
14404 if (uses_spr)
14405 expected_nopnds += 2;
14407 gcc_assert (nopnds == expected_nopnds
14408 && nopnds <= MAX_HTM_OPERANDS);
14411 switch (nopnds)
14413 case 1:
14414 pat = GEN_FCN (icode) (op[0]);
14415 break;
14416 case 2:
14417 pat = GEN_FCN (icode) (op[0], op[1]);
14418 break;
14419 case 3:
14420 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14421 break;
14422 case 4:
14423 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14424 break;
14425 default:
14426 gcc_unreachable ();
14428 if (!pat)
14429 return NULL_RTX;
14430 emit_insn (pat);
14432 if (attr & RS6000_BTC_CR)
14434 if (fcode == HTM_BUILTIN_TBEGIN)
14436 /* Emit code to set TARGET to true or false depending on
14437 whether the tbegin. instruction successfully or failed
14438 to start a transaction. We do this by placing the 1's
14439 complement of CR's EQ bit into TARGET. */
14440 rtx scratch = gen_reg_rtx (SImode);
14441 emit_insn (gen_rtx_SET (scratch,
14442 gen_rtx_EQ (SImode, cr,
14443 const0_rtx)));
14444 emit_insn (gen_rtx_SET (target,
14445 gen_rtx_XOR (SImode, scratch,
14446 GEN_INT (1))));
14448 else
14450 /* Emit code to copy the 4-bit condition register field
14451 CR into the least significant end of register TARGET. */
14452 rtx scratch1 = gen_reg_rtx (SImode);
14453 rtx scratch2 = gen_reg_rtx (SImode);
14454 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14455 emit_insn (gen_movcc (subreg, cr));
14456 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14457 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14461 if (nonvoid)
14462 return target;
14463 return const0_rtx;
14466 *expandedp = false;
14467 return NULL_RTX;
14470 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14472 static rtx
14473 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14474 rtx target)
14476 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14477 if (fcode == RS6000_BUILTIN_CPU_INIT)
14478 return const0_rtx;
14480 if (target == 0 || GET_MODE (target) != SImode)
14481 target = gen_reg_rtx (SImode);
14483 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14484 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14485 if (TREE_CODE (arg) != STRING_CST)
14487 error ("builtin %s only accepts a string argument",
14488 rs6000_builtin_info[(size_t) fcode].name);
14489 return const0_rtx;
14492 if (fcode == RS6000_BUILTIN_CPU_IS)
14494 const char *cpu = TREE_STRING_POINTER (arg);
14495 rtx cpuid = NULL_RTX;
14496 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14497 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14499 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14500 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14501 break;
14503 if (cpuid == NULL_RTX)
14505 /* Invalid CPU argument. */
14506 error ("cpu %s is an invalid argument to builtin %s",
14507 cpu, rs6000_builtin_info[(size_t) fcode].name);
14508 return const0_rtx;
14511 rtx platform = gen_reg_rtx (SImode);
14512 rtx tcbmem = gen_const_mem (SImode,
14513 gen_rtx_PLUS (Pmode,
14514 gen_rtx_REG (Pmode, TLS_REGNUM),
14515 GEN_INT (TCB_PLATFORM_OFFSET)));
14516 emit_move_insn (platform, tcbmem);
14517 emit_insn (gen_eqsi3 (target, platform, cpuid));
14519 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14521 const char *hwcap = TREE_STRING_POINTER (arg);
14522 rtx mask = NULL_RTX;
14523 int hwcap_offset;
14524 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14525 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14527 mask = GEN_INT (cpu_supports_info[i].mask);
14528 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14529 break;
14531 if (mask == NULL_RTX)
14533 /* Invalid HWCAP argument. */
14534 error ("hwcap %s is an invalid argument to builtin %s",
14535 hwcap, rs6000_builtin_info[(size_t) fcode].name);
14536 return const0_rtx;
14539 rtx tcb_hwcap = gen_reg_rtx (SImode);
14540 rtx tcbmem = gen_const_mem (SImode,
14541 gen_rtx_PLUS (Pmode,
14542 gen_rtx_REG (Pmode, TLS_REGNUM),
14543 GEN_INT (hwcap_offset)));
14544 emit_move_insn (tcb_hwcap, tcbmem);
14545 rtx scratch1 = gen_reg_rtx (SImode);
14546 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14547 rtx scratch2 = gen_reg_rtx (SImode);
14548 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14549 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14552 /* Record that we have expanded a CPU builtin, so that we can later
14553 emit a reference to the special symbol exported by LIBC to ensure we
14554 do not link against an old LIBC that doesn't support this feature. */
14555 cpu_builtin_p = true;
14557 #else
14558 /* For old LIBCs, always return FALSE. */
14559 emit_move_insn (target, GEN_INT (0));
14560 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14562 return target;
14565 static rtx
14566 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14568 rtx pat;
14569 tree arg0 = CALL_EXPR_ARG (exp, 0);
14570 tree arg1 = CALL_EXPR_ARG (exp, 1);
14571 tree arg2 = CALL_EXPR_ARG (exp, 2);
14572 rtx op0 = expand_normal (arg0);
14573 rtx op1 = expand_normal (arg1);
14574 rtx op2 = expand_normal (arg2);
14575 machine_mode tmode = insn_data[icode].operand[0].mode;
14576 machine_mode mode0 = insn_data[icode].operand[1].mode;
14577 machine_mode mode1 = insn_data[icode].operand[2].mode;
14578 machine_mode mode2 = insn_data[icode].operand[3].mode;
14580 if (icode == CODE_FOR_nothing)
14581 /* Builtin not supported on this processor. */
14582 return 0;
14584 /* If we got invalid arguments bail out before generating bad rtl. */
14585 if (arg0 == error_mark_node
14586 || arg1 == error_mark_node
14587 || arg2 == error_mark_node)
14588 return const0_rtx;
14590 /* Check and prepare argument depending on the instruction code.
14592 Note that a switch statement instead of the sequence of tests
14593 would be incorrect as many of the CODE_FOR values could be
14594 CODE_FOR_nothing and that would yield multiple alternatives
14595 with identical values. We'd never reach here at runtime in
14596 this case. */
14597 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14598 || icode == CODE_FOR_altivec_vsldoi_v4si
14599 || icode == CODE_FOR_altivec_vsldoi_v8hi
14600 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14602 /* Only allow 4-bit unsigned literals. */
14603 STRIP_NOPS (arg2);
14604 if (TREE_CODE (arg2) != INTEGER_CST
14605 || TREE_INT_CST_LOW (arg2) & ~0xf)
14607 error ("argument 3 must be a 4-bit unsigned literal");
14608 return const0_rtx;
14611 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14612 || icode == CODE_FOR_vsx_xxpermdi_v2di
14613 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14614 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14615 || icode == CODE_FOR_vsx_xxsldwi_v4si
14616 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14617 || icode == CODE_FOR_vsx_xxsldwi_v2di
14618 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14620 /* Only allow 2-bit unsigned literals. */
14621 STRIP_NOPS (arg2);
14622 if (TREE_CODE (arg2) != INTEGER_CST
14623 || TREE_INT_CST_LOW (arg2) & ~0x3)
14625 error ("argument 3 must be a 2-bit unsigned literal");
14626 return const0_rtx;
14629 else if (icode == CODE_FOR_vsx_set_v2df
14630 || icode == CODE_FOR_vsx_set_v2di
14631 || icode == CODE_FOR_bcdadd
14632 || icode == CODE_FOR_bcdadd_lt
14633 || icode == CODE_FOR_bcdadd_eq
14634 || icode == CODE_FOR_bcdadd_gt
14635 || icode == CODE_FOR_bcdsub
14636 || icode == CODE_FOR_bcdsub_lt
14637 || icode == CODE_FOR_bcdsub_eq
14638 || icode == CODE_FOR_bcdsub_gt)
14640 /* Only allow 1-bit unsigned literals. */
14641 STRIP_NOPS (arg2);
14642 if (TREE_CODE (arg2) != INTEGER_CST
14643 || TREE_INT_CST_LOW (arg2) & ~0x1)
14645 error ("argument 3 must be a 1-bit unsigned literal");
14646 return const0_rtx;
14649 else if (icode == CODE_FOR_dfp_ddedpd_dd
14650 || icode == CODE_FOR_dfp_ddedpd_td)
14652 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14653 STRIP_NOPS (arg0);
14654 if (TREE_CODE (arg0) != INTEGER_CST
14655 || TREE_INT_CST_LOW (arg2) & ~0x3)
14657 error ("argument 1 must be 0 or 2");
14658 return const0_rtx;
14661 else if (icode == CODE_FOR_dfp_denbcd_dd
14662 || icode == CODE_FOR_dfp_denbcd_td)
14664 /* Only allow 1-bit unsigned literals. */
14665 STRIP_NOPS (arg0);
14666 if (TREE_CODE (arg0) != INTEGER_CST
14667 || TREE_INT_CST_LOW (arg0) & ~0x1)
14669 error ("argument 1 must be a 1-bit unsigned literal");
14670 return const0_rtx;
14673 else if (icode == CODE_FOR_dfp_dscli_dd
14674 || icode == CODE_FOR_dfp_dscli_td
14675 || icode == CODE_FOR_dfp_dscri_dd
14676 || icode == CODE_FOR_dfp_dscri_td)
14678 /* Only allow 6-bit unsigned literals. */
14679 STRIP_NOPS (arg1);
14680 if (TREE_CODE (arg1) != INTEGER_CST
14681 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14683 error ("argument 2 must be a 6-bit unsigned literal");
14684 return const0_rtx;
14687 else if (icode == CODE_FOR_crypto_vshasigmaw
14688 || icode == CODE_FOR_crypto_vshasigmad)
14690 /* Check whether the 2nd and 3rd arguments are integer constants and in
14691 range and prepare arguments. */
14692 STRIP_NOPS (arg1);
14693 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
14695 error ("argument 2 must be 0 or 1");
14696 return const0_rtx;
14699 STRIP_NOPS (arg2);
14700 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
14702 error ("argument 3 must be in the range 0..15");
14703 return const0_rtx;
14707 if (target == 0
14708 || GET_MODE (target) != tmode
14709 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14710 target = gen_reg_rtx (tmode);
14712 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14713 op0 = copy_to_mode_reg (mode0, op0);
14714 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14715 op1 = copy_to_mode_reg (mode1, op1);
14716 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14717 op2 = copy_to_mode_reg (mode2, op2);
14719 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
14720 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
14721 else
14722 pat = GEN_FCN (icode) (target, op0, op1, op2);
14723 if (! pat)
14724 return 0;
14725 emit_insn (pat);
14727 return target;
14730 /* Expand the lvx builtins. */
14731 static rtx
14732 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
14734 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14735 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14736 tree arg0;
14737 machine_mode tmode, mode0;
14738 rtx pat, op0;
14739 enum insn_code icode;
14741 switch (fcode)
14743 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
14744 icode = CODE_FOR_vector_altivec_load_v16qi;
14745 break;
14746 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
14747 icode = CODE_FOR_vector_altivec_load_v8hi;
14748 break;
14749 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
14750 icode = CODE_FOR_vector_altivec_load_v4si;
14751 break;
14752 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
14753 icode = CODE_FOR_vector_altivec_load_v4sf;
14754 break;
14755 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
14756 icode = CODE_FOR_vector_altivec_load_v2df;
14757 break;
14758 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
14759 icode = CODE_FOR_vector_altivec_load_v2di;
14760 break;
14761 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
14762 icode = CODE_FOR_vector_altivec_load_v1ti;
14763 break;
14764 default:
14765 *expandedp = false;
14766 return NULL_RTX;
14769 *expandedp = true;
14771 arg0 = CALL_EXPR_ARG (exp, 0);
14772 op0 = expand_normal (arg0);
14773 tmode = insn_data[icode].operand[0].mode;
14774 mode0 = insn_data[icode].operand[1].mode;
14776 if (target == 0
14777 || GET_MODE (target) != tmode
14778 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14779 target = gen_reg_rtx (tmode);
14781 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14782 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14784 pat = GEN_FCN (icode) (target, op0);
14785 if (! pat)
14786 return 0;
14787 emit_insn (pat);
14788 return target;
14791 /* Expand the stvx builtins. */
14792 static rtx
14793 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14794 bool *expandedp)
14796 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14797 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14798 tree arg0, arg1;
14799 machine_mode mode0, mode1;
14800 rtx pat, op0, op1;
14801 enum insn_code icode;
14803 switch (fcode)
14805 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
14806 icode = CODE_FOR_vector_altivec_store_v16qi;
14807 break;
14808 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
14809 icode = CODE_FOR_vector_altivec_store_v8hi;
14810 break;
14811 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
14812 icode = CODE_FOR_vector_altivec_store_v4si;
14813 break;
14814 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
14815 icode = CODE_FOR_vector_altivec_store_v4sf;
14816 break;
14817 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
14818 icode = CODE_FOR_vector_altivec_store_v2df;
14819 break;
14820 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
14821 icode = CODE_FOR_vector_altivec_store_v2di;
14822 break;
14823 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
14824 icode = CODE_FOR_vector_altivec_store_v1ti;
14825 break;
14826 default:
14827 *expandedp = false;
14828 return NULL_RTX;
14831 arg0 = CALL_EXPR_ARG (exp, 0);
14832 arg1 = CALL_EXPR_ARG (exp, 1);
14833 op0 = expand_normal (arg0);
14834 op1 = expand_normal (arg1);
14835 mode0 = insn_data[icode].operand[0].mode;
14836 mode1 = insn_data[icode].operand[1].mode;
14838 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14839 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14840 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14841 op1 = copy_to_mode_reg (mode1, op1);
14843 pat = GEN_FCN (icode) (op0, op1);
14844 if (pat)
14845 emit_insn (pat);
14847 *expandedp = true;
14848 return NULL_RTX;
14851 /* Expand the dst builtins. */
14852 static rtx
14853 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14854 bool *expandedp)
14856 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14857 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14858 tree arg0, arg1, arg2;
14859 machine_mode mode0, mode1;
14860 rtx pat, op0, op1, op2;
14861 const struct builtin_description *d;
14862 size_t i;
14864 *expandedp = false;
14866 /* Handle DST variants. */
14867 d = bdesc_dst;
14868 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14869 if (d->code == fcode)
14871 arg0 = CALL_EXPR_ARG (exp, 0);
14872 arg1 = CALL_EXPR_ARG (exp, 1);
14873 arg2 = CALL_EXPR_ARG (exp, 2);
14874 op0 = expand_normal (arg0);
14875 op1 = expand_normal (arg1);
14876 op2 = expand_normal (arg2);
14877 mode0 = insn_data[d->icode].operand[0].mode;
14878 mode1 = insn_data[d->icode].operand[1].mode;
14880 /* Invalid arguments, bail out before generating bad rtl. */
14881 if (arg0 == error_mark_node
14882 || arg1 == error_mark_node
14883 || arg2 == error_mark_node)
14884 return const0_rtx;
14886 *expandedp = true;
14887 STRIP_NOPS (arg2);
14888 if (TREE_CODE (arg2) != INTEGER_CST
14889 || TREE_INT_CST_LOW (arg2) & ~0x3)
14891 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14892 return const0_rtx;
14895 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14896 op0 = copy_to_mode_reg (Pmode, op0);
14897 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14898 op1 = copy_to_mode_reg (mode1, op1);
14900 pat = GEN_FCN (d->icode) (op0, op1, op2);
14901 if (pat != 0)
14902 emit_insn (pat);
14904 return NULL_RTX;
14907 return NULL_RTX;
14910 /* Expand vec_init builtin. */
14911 static rtx
14912 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14914 machine_mode tmode = TYPE_MODE (type);
14915 machine_mode inner_mode = GET_MODE_INNER (tmode);
14916 int i, n_elt = GET_MODE_NUNITS (tmode);
14918 gcc_assert (VECTOR_MODE_P (tmode));
14919 gcc_assert (n_elt == call_expr_nargs (exp));
14921 if (!target || !register_operand (target, tmode))
14922 target = gen_reg_rtx (tmode);
14924 /* If we have a vector compromised of a single element, such as V1TImode, do
14925 the initialization directly. */
14926 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14928 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14929 emit_move_insn (target, gen_lowpart (tmode, x));
14931 else
14933 rtvec v = rtvec_alloc (n_elt);
14935 for (i = 0; i < n_elt; ++i)
14937 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14938 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14941 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14944 return target;
14947 /* Return the integer constant in ARG. Constrain it to be in the range
14948 of the subparts of VEC_TYPE; issue an error if not. */
14950 static int
14951 get_element_number (tree vec_type, tree arg)
14953 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14955 if (!tree_fits_uhwi_p (arg)
14956 || (elt = tree_to_uhwi (arg), elt > max))
14958 error ("selector must be an integer constant in the range 0..%wi", max);
14959 return 0;
14962 return elt;
14965 /* Expand vec_set builtin. */
14966 static rtx
14967 altivec_expand_vec_set_builtin (tree exp)
14969 machine_mode tmode, mode1;
14970 tree arg0, arg1, arg2;
14971 int elt;
14972 rtx op0, op1;
14974 arg0 = CALL_EXPR_ARG (exp, 0);
14975 arg1 = CALL_EXPR_ARG (exp, 1);
14976 arg2 = CALL_EXPR_ARG (exp, 2);
14978 tmode = TYPE_MODE (TREE_TYPE (arg0));
14979 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14980 gcc_assert (VECTOR_MODE_P (tmode));
14982 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14983 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14984 elt = get_element_number (TREE_TYPE (arg0), arg2);
14986 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14987 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14989 op0 = force_reg (tmode, op0);
14990 op1 = force_reg (mode1, op1);
14992 rs6000_expand_vector_set (op0, op1, elt);
14994 return op0;
14997 /* Expand vec_ext builtin. */
14998 static rtx
14999 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15001 machine_mode tmode, mode0;
15002 tree arg0, arg1;
15003 rtx op0;
15004 rtx op1;
15006 arg0 = CALL_EXPR_ARG (exp, 0);
15007 arg1 = CALL_EXPR_ARG (exp, 1);
15009 op0 = expand_normal (arg0);
15010 op1 = expand_normal (arg1);
15012 /* Call get_element_number to validate arg1 if it is a constant. */
15013 if (TREE_CODE (arg1) == INTEGER_CST)
15014 (void) get_element_number (TREE_TYPE (arg0), arg1);
15016 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15017 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15018 gcc_assert (VECTOR_MODE_P (mode0));
15020 op0 = force_reg (mode0, op0);
15022 if (optimize || !target || !register_operand (target, tmode))
15023 target = gen_reg_rtx (tmode);
15025 rs6000_expand_vector_extract (target, op0, op1);
15027 return target;
15030 /* Expand the builtin in EXP and store the result in TARGET. Store
15031 true in *EXPANDEDP if we found a builtin to expand. */
15032 static rtx
15033 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15035 const struct builtin_description *d;
15036 size_t i;
15037 enum insn_code icode;
15038 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15039 tree arg0;
15040 rtx op0, pat;
15041 machine_mode tmode, mode0;
15042 enum rs6000_builtins fcode
15043 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15045 if (rs6000_overloaded_builtin_p (fcode))
15047 *expandedp = true;
15048 error ("unresolved overload for Altivec builtin %qF", fndecl);
15050 /* Given it is invalid, just generate a normal call. */
15051 return expand_call (exp, target, false);
15054 target = altivec_expand_ld_builtin (exp, target, expandedp);
15055 if (*expandedp)
15056 return target;
15058 target = altivec_expand_st_builtin (exp, target, expandedp);
15059 if (*expandedp)
15060 return target;
15062 target = altivec_expand_dst_builtin (exp, target, expandedp);
15063 if (*expandedp)
15064 return target;
15066 *expandedp = true;
15068 switch (fcode)
15070 case ALTIVEC_BUILTIN_STVX_V2DF:
15071 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15072 case ALTIVEC_BUILTIN_STVX_V2DI:
15073 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15074 case ALTIVEC_BUILTIN_STVX_V4SF:
15075 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15076 case ALTIVEC_BUILTIN_STVX:
15077 case ALTIVEC_BUILTIN_STVX_V4SI:
15078 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15079 case ALTIVEC_BUILTIN_STVX_V8HI:
15080 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15081 case ALTIVEC_BUILTIN_STVX_V16QI:
15082 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15083 case ALTIVEC_BUILTIN_STVEBX:
15084 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15085 case ALTIVEC_BUILTIN_STVEHX:
15086 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15087 case ALTIVEC_BUILTIN_STVEWX:
15088 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15089 case ALTIVEC_BUILTIN_STVXL_V2DF:
15090 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15091 case ALTIVEC_BUILTIN_STVXL_V2DI:
15092 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15093 case ALTIVEC_BUILTIN_STVXL_V4SF:
15094 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15095 case ALTIVEC_BUILTIN_STVXL:
15096 case ALTIVEC_BUILTIN_STVXL_V4SI:
15097 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15098 case ALTIVEC_BUILTIN_STVXL_V8HI:
15099 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15100 case ALTIVEC_BUILTIN_STVXL_V16QI:
15101 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15103 case ALTIVEC_BUILTIN_STVLX:
15104 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15105 case ALTIVEC_BUILTIN_STVLXL:
15106 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15107 case ALTIVEC_BUILTIN_STVRX:
15108 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15109 case ALTIVEC_BUILTIN_STVRXL:
15110 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15112 case VSX_BUILTIN_STXVD2X_V1TI:
15113 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15114 case VSX_BUILTIN_STXVD2X_V2DF:
15115 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15116 case VSX_BUILTIN_STXVD2X_V2DI:
15117 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15118 case VSX_BUILTIN_STXVW4X_V4SF:
15119 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15120 case VSX_BUILTIN_STXVW4X_V4SI:
15121 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15122 case VSX_BUILTIN_STXVW4X_V8HI:
15123 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15124 case VSX_BUILTIN_STXVW4X_V16QI:
15125 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15127 /* For the following on big endian, it's ok to use any appropriate
15128 unaligned-supporting store, so use a generic expander. For
15129 little-endian, the exact element-reversing instruction must
15130 be used. */
15131 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15133 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15134 : CODE_FOR_vsx_st_elemrev_v2df);
15135 return altivec_expand_stv_builtin (code, exp);
15137 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15139 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15140 : CODE_FOR_vsx_st_elemrev_v2di);
15141 return altivec_expand_stv_builtin (code, exp);
15143 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15145 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15146 : CODE_FOR_vsx_st_elemrev_v4sf);
15147 return altivec_expand_stv_builtin (code, exp);
15149 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15151 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15152 : CODE_FOR_vsx_st_elemrev_v4si);
15153 return altivec_expand_stv_builtin (code, exp);
15155 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15157 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15158 : CODE_FOR_vsx_st_elemrev_v8hi);
15159 return altivec_expand_stv_builtin (code, exp);
15161 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15163 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15164 : CODE_FOR_vsx_st_elemrev_v16qi);
15165 return altivec_expand_stv_builtin (code, exp);
15168 case ALTIVEC_BUILTIN_MFVSCR:
15169 icode = CODE_FOR_altivec_mfvscr;
15170 tmode = insn_data[icode].operand[0].mode;
15172 if (target == 0
15173 || GET_MODE (target) != tmode
15174 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15175 target = gen_reg_rtx (tmode);
15177 pat = GEN_FCN (icode) (target);
15178 if (! pat)
15179 return 0;
15180 emit_insn (pat);
15181 return target;
15183 case ALTIVEC_BUILTIN_MTVSCR:
15184 icode = CODE_FOR_altivec_mtvscr;
15185 arg0 = CALL_EXPR_ARG (exp, 0);
15186 op0 = expand_normal (arg0);
15187 mode0 = insn_data[icode].operand[0].mode;
15189 /* If we got invalid arguments bail out before generating bad rtl. */
15190 if (arg0 == error_mark_node)
15191 return const0_rtx;
15193 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15194 op0 = copy_to_mode_reg (mode0, op0);
15196 pat = GEN_FCN (icode) (op0);
15197 if (pat)
15198 emit_insn (pat);
15199 return NULL_RTX;
15201 case ALTIVEC_BUILTIN_DSSALL:
15202 emit_insn (gen_altivec_dssall ());
15203 return NULL_RTX;
15205 case ALTIVEC_BUILTIN_DSS:
15206 icode = CODE_FOR_altivec_dss;
15207 arg0 = CALL_EXPR_ARG (exp, 0);
15208 STRIP_NOPS (arg0);
15209 op0 = expand_normal (arg0);
15210 mode0 = insn_data[icode].operand[0].mode;
15212 /* If we got invalid arguments bail out before generating bad rtl. */
15213 if (arg0 == error_mark_node)
15214 return const0_rtx;
15216 if (TREE_CODE (arg0) != INTEGER_CST
15217 || TREE_INT_CST_LOW (arg0) & ~0x3)
15219 error ("argument to dss must be a 2-bit unsigned literal");
15220 return const0_rtx;
15223 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15224 op0 = copy_to_mode_reg (mode0, op0);
15226 emit_insn (gen_altivec_dss (op0));
15227 return NULL_RTX;
15229 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15230 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15231 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15232 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15233 case VSX_BUILTIN_VEC_INIT_V2DF:
15234 case VSX_BUILTIN_VEC_INIT_V2DI:
15235 case VSX_BUILTIN_VEC_INIT_V1TI:
15236 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15238 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15239 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15240 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15241 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15242 case VSX_BUILTIN_VEC_SET_V2DF:
15243 case VSX_BUILTIN_VEC_SET_V2DI:
15244 case VSX_BUILTIN_VEC_SET_V1TI:
15245 return altivec_expand_vec_set_builtin (exp);
15247 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15248 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15249 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15250 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15251 case VSX_BUILTIN_VEC_EXT_V2DF:
15252 case VSX_BUILTIN_VEC_EXT_V2DI:
15253 case VSX_BUILTIN_VEC_EXT_V1TI:
15254 return altivec_expand_vec_ext_builtin (exp, target);
15256 default:
15257 break;
15258 /* Fall through. */
15261 /* Expand abs* operations. */
15262 d = bdesc_abs;
15263 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15264 if (d->code == fcode)
15265 return altivec_expand_abs_builtin (d->icode, exp, target);
15267 /* Expand the AltiVec predicates. */
15268 d = bdesc_altivec_preds;
15269 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15270 if (d->code == fcode)
15271 return altivec_expand_predicate_builtin (d->icode, exp, target);
15273 /* LV* are funky. We initialized them differently. */
15274 switch (fcode)
15276 case ALTIVEC_BUILTIN_LVSL:
15277 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15278 exp, target, false);
15279 case ALTIVEC_BUILTIN_LVSR:
15280 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15281 exp, target, false);
15282 case ALTIVEC_BUILTIN_LVEBX:
15283 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15284 exp, target, false);
15285 case ALTIVEC_BUILTIN_LVEHX:
15286 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15287 exp, target, false);
15288 case ALTIVEC_BUILTIN_LVEWX:
15289 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15290 exp, target, false);
15291 case ALTIVEC_BUILTIN_LVXL_V2DF:
15292 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15293 exp, target, false);
15294 case ALTIVEC_BUILTIN_LVXL_V2DI:
15295 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15296 exp, target, false);
15297 case ALTIVEC_BUILTIN_LVXL_V4SF:
15298 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15299 exp, target, false);
15300 case ALTIVEC_BUILTIN_LVXL:
15301 case ALTIVEC_BUILTIN_LVXL_V4SI:
15302 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15303 exp, target, false);
15304 case ALTIVEC_BUILTIN_LVXL_V8HI:
15305 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15306 exp, target, false);
15307 case ALTIVEC_BUILTIN_LVXL_V16QI:
15308 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15309 exp, target, false);
15310 case ALTIVEC_BUILTIN_LVX_V2DF:
15311 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15312 exp, target, false);
15313 case ALTIVEC_BUILTIN_LVX_V2DI:
15314 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15315 exp, target, false);
15316 case ALTIVEC_BUILTIN_LVX_V4SF:
15317 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15318 exp, target, false);
15319 case ALTIVEC_BUILTIN_LVX:
15320 case ALTIVEC_BUILTIN_LVX_V4SI:
15321 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15322 exp, target, false);
15323 case ALTIVEC_BUILTIN_LVX_V8HI:
15324 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15325 exp, target, false);
15326 case ALTIVEC_BUILTIN_LVX_V16QI:
15327 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15328 exp, target, false);
15329 case ALTIVEC_BUILTIN_LVLX:
15330 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15331 exp, target, true);
15332 case ALTIVEC_BUILTIN_LVLXL:
15333 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15334 exp, target, true);
15335 case ALTIVEC_BUILTIN_LVRX:
15336 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15337 exp, target, true);
15338 case ALTIVEC_BUILTIN_LVRXL:
15339 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15340 exp, target, true);
15341 case VSX_BUILTIN_LXVD2X_V1TI:
15342 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15343 exp, target, false);
15344 case VSX_BUILTIN_LXVD2X_V2DF:
15345 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15346 exp, target, false);
15347 case VSX_BUILTIN_LXVD2X_V2DI:
15348 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15349 exp, target, false);
15350 case VSX_BUILTIN_LXVW4X_V4SF:
15351 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15352 exp, target, false);
15353 case VSX_BUILTIN_LXVW4X_V4SI:
15354 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15355 exp, target, false);
15356 case VSX_BUILTIN_LXVW4X_V8HI:
15357 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15358 exp, target, false);
15359 case VSX_BUILTIN_LXVW4X_V16QI:
15360 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15361 exp, target, false);
15362 /* For the following on big endian, it's ok to use any appropriate
15363 unaligned-supporting load, so use a generic expander. For
15364 little-endian, the exact element-reversing instruction must
15365 be used. */
15366 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15368 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15369 : CODE_FOR_vsx_ld_elemrev_v2df);
15370 return altivec_expand_lv_builtin (code, exp, target, false);
15372 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15374 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15375 : CODE_FOR_vsx_ld_elemrev_v2di);
15376 return altivec_expand_lv_builtin (code, exp, target, false);
15378 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15380 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15381 : CODE_FOR_vsx_ld_elemrev_v4sf);
15382 return altivec_expand_lv_builtin (code, exp, target, false);
15384 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15386 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15387 : CODE_FOR_vsx_ld_elemrev_v4si);
15388 return altivec_expand_lv_builtin (code, exp, target, false);
15390 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15392 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15393 : CODE_FOR_vsx_ld_elemrev_v8hi);
15394 return altivec_expand_lv_builtin (code, exp, target, false);
15396 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15398 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15399 : CODE_FOR_vsx_ld_elemrev_v16qi);
15400 return altivec_expand_lv_builtin (code, exp, target, false);
15402 break;
15403 default:
15404 break;
15405 /* Fall through. */
15408 *expandedp = false;
15409 return NULL_RTX;
15412 /* Expand the builtin in EXP and store the result in TARGET. Store
15413 true in *EXPANDEDP if we found a builtin to expand. */
15414 static rtx
15415 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15417 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15418 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15419 const struct builtin_description *d;
15420 size_t i;
15422 *expandedp = true;
15424 switch (fcode)
15426 case PAIRED_BUILTIN_STX:
15427 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15428 case PAIRED_BUILTIN_LX:
15429 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15430 default:
15431 break;
15432 /* Fall through. */
15435 /* Expand the paired predicates. */
15436 d = bdesc_paired_preds;
15437 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15438 if (d->code == fcode)
15439 return paired_expand_predicate_builtin (d->icode, exp, target);
15441 *expandedp = false;
15442 return NULL_RTX;
15445 /* Binops that need to be initialized manually, but can be expanded
15446 automagically by rs6000_expand_binop_builtin. */
15447 static const struct builtin_description bdesc_2arg_spe[] =
15449 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
15450 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
15451 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
15452 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
15453 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
15454 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
15455 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
15456 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
15457 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
15458 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
15459 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
15460 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
15461 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
15462 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
15463 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
15464 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
15465 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
15466 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
15467 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
15468 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
15469 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
15470 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
15473 /* Expand the builtin in EXP and store the result in TARGET. Store
15474 true in *EXPANDEDP if we found a builtin to expand.
15476 This expands the SPE builtins that are not simple unary and binary
15477 operations. */
15478 static rtx
15479 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
15481 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15482 tree arg1, arg0;
15483 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15484 enum insn_code icode;
15485 machine_mode tmode, mode0;
15486 rtx pat, op0;
15487 const struct builtin_description *d;
15488 size_t i;
15490 *expandedp = true;
15492 /* Syntax check for a 5-bit unsigned immediate. */
15493 switch (fcode)
15495 case SPE_BUILTIN_EVSTDD:
15496 case SPE_BUILTIN_EVSTDH:
15497 case SPE_BUILTIN_EVSTDW:
15498 case SPE_BUILTIN_EVSTWHE:
15499 case SPE_BUILTIN_EVSTWHO:
15500 case SPE_BUILTIN_EVSTWWE:
15501 case SPE_BUILTIN_EVSTWWO:
15502 arg1 = CALL_EXPR_ARG (exp, 2);
15503 if (TREE_CODE (arg1) != INTEGER_CST
15504 || TREE_INT_CST_LOW (arg1) & ~0x1f)
15506 error ("argument 2 must be a 5-bit unsigned literal");
15507 return const0_rtx;
15509 break;
15510 default:
15511 break;
15514 /* The evsplat*i instructions are not quite generic. */
15515 switch (fcode)
15517 case SPE_BUILTIN_EVSPLATFI:
15518 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
15519 exp, target);
15520 case SPE_BUILTIN_EVSPLATI:
15521 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
15522 exp, target);
15523 default:
15524 break;
15527 d = bdesc_2arg_spe;
15528 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
15529 if (d->code == fcode)
15530 return rs6000_expand_binop_builtin (d->icode, exp, target);
15532 d = bdesc_spe_predicates;
15533 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
15534 if (d->code == fcode)
15535 return spe_expand_predicate_builtin (d->icode, exp, target);
15537 d = bdesc_spe_evsel;
15538 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
15539 if (d->code == fcode)
15540 return spe_expand_evsel_builtin (d->icode, exp, target);
15542 switch (fcode)
15544 case SPE_BUILTIN_EVSTDDX:
15545 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
15546 case SPE_BUILTIN_EVSTDHX:
15547 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
15548 case SPE_BUILTIN_EVSTDWX:
15549 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
15550 case SPE_BUILTIN_EVSTWHEX:
15551 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
15552 case SPE_BUILTIN_EVSTWHOX:
15553 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
15554 case SPE_BUILTIN_EVSTWWEX:
15555 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
15556 case SPE_BUILTIN_EVSTWWOX:
15557 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
15558 case SPE_BUILTIN_EVSTDD:
15559 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
15560 case SPE_BUILTIN_EVSTDH:
15561 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
15562 case SPE_BUILTIN_EVSTDW:
15563 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
15564 case SPE_BUILTIN_EVSTWHE:
15565 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
15566 case SPE_BUILTIN_EVSTWHO:
15567 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
15568 case SPE_BUILTIN_EVSTWWE:
15569 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
15570 case SPE_BUILTIN_EVSTWWO:
15571 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
15572 case SPE_BUILTIN_MFSPEFSCR:
15573 icode = CODE_FOR_spe_mfspefscr;
15574 tmode = insn_data[icode].operand[0].mode;
15576 if (target == 0
15577 || GET_MODE (target) != tmode
15578 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15579 target = gen_reg_rtx (tmode);
15581 pat = GEN_FCN (icode) (target);
15582 if (! pat)
15583 return 0;
15584 emit_insn (pat);
15585 return target;
15586 case SPE_BUILTIN_MTSPEFSCR:
15587 icode = CODE_FOR_spe_mtspefscr;
15588 arg0 = CALL_EXPR_ARG (exp, 0);
15589 op0 = expand_normal (arg0);
15590 mode0 = insn_data[icode].operand[0].mode;
15592 if (arg0 == error_mark_node)
15593 return const0_rtx;
15595 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15596 op0 = copy_to_mode_reg (mode0, op0);
15598 pat = GEN_FCN (icode) (op0);
15599 if (pat)
15600 emit_insn (pat);
15601 return NULL_RTX;
15602 default:
15603 break;
15606 *expandedp = false;
15607 return NULL_RTX;
15610 static rtx
15611 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15613 rtx pat, scratch, tmp;
15614 tree form = CALL_EXPR_ARG (exp, 0);
15615 tree arg0 = CALL_EXPR_ARG (exp, 1);
15616 tree arg1 = CALL_EXPR_ARG (exp, 2);
15617 rtx op0 = expand_normal (arg0);
15618 rtx op1 = expand_normal (arg1);
15619 machine_mode mode0 = insn_data[icode].operand[1].mode;
15620 machine_mode mode1 = insn_data[icode].operand[2].mode;
15621 int form_int;
15622 enum rtx_code code;
15624 if (TREE_CODE (form) != INTEGER_CST)
15626 error ("argument 1 of __builtin_paired_predicate must be a constant");
15627 return const0_rtx;
15629 else
15630 form_int = TREE_INT_CST_LOW (form);
15632 gcc_assert (mode0 == mode1);
15634 if (arg0 == error_mark_node || arg1 == error_mark_node)
15635 return const0_rtx;
15637 if (target == 0
15638 || GET_MODE (target) != SImode
15639 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15640 target = gen_reg_rtx (SImode);
15641 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15642 op0 = copy_to_mode_reg (mode0, op0);
15643 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15644 op1 = copy_to_mode_reg (mode1, op1);
15646 scratch = gen_reg_rtx (CCFPmode);
15648 pat = GEN_FCN (icode) (scratch, op0, op1);
15649 if (!pat)
15650 return const0_rtx;
15652 emit_insn (pat);
15654 switch (form_int)
15656 /* LT bit. */
15657 case 0:
15658 code = LT;
15659 break;
15660 /* GT bit. */
15661 case 1:
15662 code = GT;
15663 break;
15664 /* EQ bit. */
15665 case 2:
15666 code = EQ;
15667 break;
15668 /* UN bit. */
15669 case 3:
15670 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15671 return target;
15672 default:
15673 error ("argument 1 of __builtin_paired_predicate is out of range");
15674 return const0_rtx;
15677 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15678 emit_move_insn (target, tmp);
15679 return target;
15682 static rtx
15683 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15685 rtx pat, scratch, tmp;
15686 tree form = CALL_EXPR_ARG (exp, 0);
15687 tree arg0 = CALL_EXPR_ARG (exp, 1);
15688 tree arg1 = CALL_EXPR_ARG (exp, 2);
15689 rtx op0 = expand_normal (arg0);
15690 rtx op1 = expand_normal (arg1);
15691 machine_mode mode0 = insn_data[icode].operand[1].mode;
15692 machine_mode mode1 = insn_data[icode].operand[2].mode;
15693 int form_int;
15694 enum rtx_code code;
15696 if (TREE_CODE (form) != INTEGER_CST)
15698 error ("argument 1 of __builtin_spe_predicate must be a constant");
15699 return const0_rtx;
15701 else
15702 form_int = TREE_INT_CST_LOW (form);
15704 gcc_assert (mode0 == mode1);
15706 if (arg0 == error_mark_node || arg1 == error_mark_node)
15707 return const0_rtx;
15709 if (target == 0
15710 || GET_MODE (target) != SImode
15711 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
15712 target = gen_reg_rtx (SImode);
15714 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15715 op0 = copy_to_mode_reg (mode0, op0);
15716 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15717 op1 = copy_to_mode_reg (mode1, op1);
15719 scratch = gen_reg_rtx (CCmode);
15721 pat = GEN_FCN (icode) (scratch, op0, op1);
15722 if (! pat)
15723 return const0_rtx;
15724 emit_insn (pat);
15726 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
15727 _lower_. We use one compare, but look in different bits of the
15728 CR for each variant.
15730 There are 2 elements in each SPE simd type (upper/lower). The CR
15731 bits are set as follows:
15733 BIT0 | BIT 1 | BIT 2 | BIT 3
15734 U | L | (U | L) | (U & L)
15736 So, for an "all" relationship, BIT 3 would be set.
15737 For an "any" relationship, BIT 2 would be set. Etc.
15739 Following traditional nomenclature, these bits map to:
15741 BIT0 | BIT 1 | BIT 2 | BIT 3
15742 LT | GT | EQ | OV
15744 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
15747 switch (form_int)
15749 /* All variant. OV bit. */
15750 case 0:
15751 /* We need to get to the OV bit, which is the ORDERED bit. We
15752 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
15753 that's ugly and will make validate_condition_mode die.
15754 So let's just use another pattern. */
15755 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15756 return target;
15757 /* Any variant. EQ bit. */
15758 case 1:
15759 code = EQ;
15760 break;
15761 /* Upper variant. LT bit. */
15762 case 2:
15763 code = LT;
15764 break;
15765 /* Lower variant. GT bit. */
15766 case 3:
15767 code = GT;
15768 break;
15769 default:
15770 error ("argument 1 of __builtin_spe_predicate is out of range");
15771 return const0_rtx;
15774 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15775 emit_move_insn (target, tmp);
15777 return target;
15780 /* The evsel builtins look like this:
15782 e = __builtin_spe_evsel_OP (a, b, c, d);
15784 and work like this:
15786 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
15787 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
15790 static rtx
15791 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
15793 rtx pat, scratch;
15794 tree arg0 = CALL_EXPR_ARG (exp, 0);
15795 tree arg1 = CALL_EXPR_ARG (exp, 1);
15796 tree arg2 = CALL_EXPR_ARG (exp, 2);
15797 tree arg3 = CALL_EXPR_ARG (exp, 3);
15798 rtx op0 = expand_normal (arg0);
15799 rtx op1 = expand_normal (arg1);
15800 rtx op2 = expand_normal (arg2);
15801 rtx op3 = expand_normal (arg3);
15802 machine_mode mode0 = insn_data[icode].operand[1].mode;
15803 machine_mode mode1 = insn_data[icode].operand[2].mode;
15805 gcc_assert (mode0 == mode1);
15807 if (arg0 == error_mark_node || arg1 == error_mark_node
15808 || arg2 == error_mark_node || arg3 == error_mark_node)
15809 return const0_rtx;
15811 if (target == 0
15812 || GET_MODE (target) != mode0
15813 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
15814 target = gen_reg_rtx (mode0);
15816 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15817 op0 = copy_to_mode_reg (mode0, op0);
15818 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15819 op1 = copy_to_mode_reg (mode0, op1);
15820 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
15821 op2 = copy_to_mode_reg (mode0, op2);
15822 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
15823 op3 = copy_to_mode_reg (mode0, op3);
15825 /* Generate the compare. */
15826 scratch = gen_reg_rtx (CCmode);
15827 pat = GEN_FCN (icode) (scratch, op0, op1);
15828 if (! pat)
15829 return const0_rtx;
15830 emit_insn (pat);
15832 if (mode0 == V2SImode)
15833 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
15834 else
15835 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
15837 return target;
15840 /* Raise an error message for a builtin function that is called without the
15841 appropriate target options being set. */
15843 static void
15844 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15846 size_t uns_fncode = (size_t)fncode;
15847 const char *name = rs6000_builtin_info[uns_fncode].name;
15848 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15850 gcc_assert (name != NULL);
15851 if ((fnmask & RS6000_BTM_CELL) != 0)
15852 error ("Builtin function %s is only valid for the cell processor", name);
15853 else if ((fnmask & RS6000_BTM_VSX) != 0)
15854 error ("Builtin function %s requires the -mvsx option", name);
15855 else if ((fnmask & RS6000_BTM_HTM) != 0)
15856 error ("Builtin function %s requires the -mhtm option", name);
15857 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15858 error ("Builtin function %s requires the -maltivec option", name);
15859 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
15860 error ("Builtin function %s requires the -mpaired option", name);
15861 else if ((fnmask & RS6000_BTM_SPE) != 0)
15862 error ("Builtin function %s requires the -mspe option", name);
15863 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15864 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15865 error ("Builtin function %s requires the -mhard-dfp and"
15866 " -mpower8-vector options", name);
15867 else if ((fnmask & RS6000_BTM_DFP) != 0)
15868 error ("Builtin function %s requires the -mhard-dfp option", name);
15869 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15870 error ("Builtin function %s requires the -mpower8-vector option", name);
15871 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15872 error ("Builtin function %s requires the -mcpu=power9 option", name);
15873 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15874 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15875 error ("Builtin function %s requires the -mcpu=power9 and"
15876 " -m64 options", name);
15877 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15878 error ("Builtin function %s requires the -mcpu=power9 option", name);
15879 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15880 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15881 error ("Builtin function %s requires the -mhard-float and"
15882 " -mlong-double-128 options", name);
15883 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15884 error ("Builtin function %s requires the -mhard-float option", name);
15885 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15886 error ("Builtin function %s requires the -mfloat128 option", name);
15887 else
15888 error ("Builtin function %s is not supported with the current options",
15889 name);
15892 /* Target hook for early folding of built-ins, shamelessly stolen
15893 from ia64.c. */
15895 static tree
15896 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
15897 tree *args, bool ignore ATTRIBUTE_UNUSED)
15899 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
15901 enum rs6000_builtins fn_code
15902 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15903 switch (fn_code)
15905 case RS6000_BUILTIN_NANQ:
15906 case RS6000_BUILTIN_NANSQ:
15908 tree type = TREE_TYPE (TREE_TYPE (fndecl));
15909 const char *str = c_getstr (*args);
15910 int quiet = fn_code == RS6000_BUILTIN_NANQ;
15911 REAL_VALUE_TYPE real;
15913 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
15914 return build_real (type, real);
15915 return NULL_TREE;
15917 case RS6000_BUILTIN_INFQ:
15918 case RS6000_BUILTIN_HUGE_VALQ:
15920 tree type = TREE_TYPE (TREE_TYPE (fndecl));
15921 REAL_VALUE_TYPE inf;
15922 real_inf (&inf);
15923 return build_real (type, inf);
15925 default:
15926 break;
15929 #ifdef SUBTARGET_FOLD_BUILTIN
15930 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15931 #else
15932 return NULL_TREE;
15933 #endif
15936 /* Expand an expression EXP that calls a built-in function,
15937 with result going to TARGET if that's convenient
15938 (and in mode MODE if that's convenient).
15939 SUBTARGET may be used as the target for computing one of EXP's operands.
15940 IGNORE is nonzero if the value is to be ignored. */
15942 static rtx
15943 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15944 machine_mode mode ATTRIBUTE_UNUSED,
15945 int ignore ATTRIBUTE_UNUSED)
15947 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15948 enum rs6000_builtins fcode
15949 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15950 size_t uns_fcode = (size_t)fcode;
15951 const struct builtin_description *d;
15952 size_t i;
15953 rtx ret;
15954 bool success;
15955 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15956 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15958 if (TARGET_DEBUG_BUILTIN)
15960 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15961 const char *name1 = rs6000_builtin_info[uns_fcode].name;
15962 const char *name2 = ((icode != CODE_FOR_nothing)
15963 ? get_insn_name ((int)icode)
15964 : "nothing");
15965 const char *name3;
15967 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
15969 default: name3 = "unknown"; break;
15970 case RS6000_BTC_SPECIAL: name3 = "special"; break;
15971 case RS6000_BTC_UNARY: name3 = "unary"; break;
15972 case RS6000_BTC_BINARY: name3 = "binary"; break;
15973 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
15974 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
15975 case RS6000_BTC_ABS: name3 = "abs"; break;
15976 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
15977 case RS6000_BTC_DST: name3 = "dst"; break;
15981 fprintf (stderr,
15982 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
15983 (name1) ? name1 : "---", fcode,
15984 (name2) ? name2 : "---", (int)icode,
15985 name3,
15986 func_valid_p ? "" : ", not valid");
15989 if (!func_valid_p)
15991 rs6000_invalid_builtin (fcode);
15993 /* Given it is invalid, just generate a normal call. */
15994 return expand_call (exp, target, ignore);
15997 switch (fcode)
15999 case RS6000_BUILTIN_RECIP:
16000 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16002 case RS6000_BUILTIN_RECIPF:
16003 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16005 case RS6000_BUILTIN_RSQRTF:
16006 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16008 case RS6000_BUILTIN_RSQRT:
16009 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16011 case POWER7_BUILTIN_BPERMD:
16012 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16013 ? CODE_FOR_bpermd_di
16014 : CODE_FOR_bpermd_si), exp, target);
16016 case RS6000_BUILTIN_GET_TB:
16017 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16018 target);
16020 case RS6000_BUILTIN_MFTB:
16021 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16022 ? CODE_FOR_rs6000_mftb_di
16023 : CODE_FOR_rs6000_mftb_si),
16024 target);
16026 case RS6000_BUILTIN_MFFS:
16027 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16029 case RS6000_BUILTIN_MTFSF:
16030 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16032 case RS6000_BUILTIN_CPU_INIT:
16033 case RS6000_BUILTIN_CPU_IS:
16034 case RS6000_BUILTIN_CPU_SUPPORTS:
16035 return cpu_expand_builtin (fcode, exp, target);
16037 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16038 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16040 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16041 : (int) CODE_FOR_altivec_lvsl_direct);
16042 machine_mode tmode = insn_data[icode].operand[0].mode;
16043 machine_mode mode = insn_data[icode].operand[1].mode;
16044 tree arg;
16045 rtx op, addr, pat;
16047 gcc_assert (TARGET_ALTIVEC);
16049 arg = CALL_EXPR_ARG (exp, 0);
16050 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16051 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16052 addr = memory_address (mode, op);
16053 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16054 op = addr;
16055 else
16057 /* For the load case need to negate the address. */
16058 op = gen_reg_rtx (GET_MODE (addr));
16059 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16061 op = gen_rtx_MEM (mode, op);
16063 if (target == 0
16064 || GET_MODE (target) != tmode
16065 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16066 target = gen_reg_rtx (tmode);
16068 pat = GEN_FCN (icode) (target, op);
16069 if (!pat)
16070 return 0;
16071 emit_insn (pat);
16073 return target;
16076 case ALTIVEC_BUILTIN_VCFUX:
16077 case ALTIVEC_BUILTIN_VCFSX:
16078 case ALTIVEC_BUILTIN_VCTUXS:
16079 case ALTIVEC_BUILTIN_VCTSXS:
16080 /* FIXME: There's got to be a nicer way to handle this case than
16081 constructing a new CALL_EXPR. */
16082 if (call_expr_nargs (exp) == 1)
16084 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16085 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16087 break;
16089 default:
16090 break;
16093 if (TARGET_ALTIVEC)
16095 ret = altivec_expand_builtin (exp, target, &success);
16097 if (success)
16098 return ret;
16100 if (TARGET_SPE)
16102 ret = spe_expand_builtin (exp, target, &success);
16104 if (success)
16105 return ret;
16107 if (TARGET_PAIRED_FLOAT)
16109 ret = paired_expand_builtin (exp, target, &success);
16111 if (success)
16112 return ret;
16114 if (TARGET_HTM)
16116 ret = htm_expand_builtin (exp, target, &success);
16118 if (success)
16119 return ret;
16122 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16123 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16124 gcc_assert (attr == RS6000_BTC_UNARY
16125 || attr == RS6000_BTC_BINARY
16126 || attr == RS6000_BTC_TERNARY
16127 || attr == RS6000_BTC_SPECIAL);
16129 /* Handle simple unary operations. */
16130 d = bdesc_1arg;
16131 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16132 if (d->code == fcode)
16133 return rs6000_expand_unop_builtin (d->icode, exp, target);
16135 /* Handle simple binary operations. */
16136 d = bdesc_2arg;
16137 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16138 if (d->code == fcode)
16139 return rs6000_expand_binop_builtin (d->icode, exp, target);
16141 /* Handle simple ternary operations. */
16142 d = bdesc_3arg;
16143 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16144 if (d->code == fcode)
16145 return rs6000_expand_ternop_builtin (d->icode, exp, target);
16147 /* Handle simple no-argument operations. */
16148 d = bdesc_0arg;
16149 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16150 if (d->code == fcode)
16151 return rs6000_expand_zeroop_builtin (d->icode, target);
16153 gcc_unreachable ();
16156 static void
16157 rs6000_init_builtins (void)
16159 tree tdecl;
16160 tree ftype;
16161 machine_mode mode;
16163 if (TARGET_DEBUG_BUILTIN)
16164 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
16165 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16166 (TARGET_SPE) ? ", spe" : "",
16167 (TARGET_ALTIVEC) ? ", altivec" : "",
16168 (TARGET_VSX) ? ", vsx" : "");
16170 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16171 V2SF_type_node = build_vector_type (float_type_node, 2);
16172 V2DI_type_node = build_vector_type (intDI_type_node, 2);
16173 V2DF_type_node = build_vector_type (double_type_node, 2);
16174 V4HI_type_node = build_vector_type (intHI_type_node, 4);
16175 V4SI_type_node = build_vector_type (intSI_type_node, 4);
16176 V4SF_type_node = build_vector_type (float_type_node, 4);
16177 V8HI_type_node = build_vector_type (intHI_type_node, 8);
16178 V16QI_type_node = build_vector_type (intQI_type_node, 16);
16180 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
16181 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
16182 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
16183 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
16185 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16186 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16187 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16188 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16190 const_str_type_node
16191 = build_pointer_type (build_qualified_type (char_type_node,
16192 TYPE_QUAL_CONST));
16194 /* We use V1TI mode as a special container to hold __int128_t items that
16195 must live in VSX registers. */
16196 if (intTI_type_node)
16198 V1TI_type_node = build_vector_type (intTI_type_node, 1);
16199 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
16202 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16203 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16204 'vector unsigned short'. */
16206 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16207 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16208 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16209 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16210 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16212 long_integer_type_internal_node = long_integer_type_node;
16213 long_unsigned_type_internal_node = long_unsigned_type_node;
16214 long_long_integer_type_internal_node = long_long_integer_type_node;
16215 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16216 intQI_type_internal_node = intQI_type_node;
16217 uintQI_type_internal_node = unsigned_intQI_type_node;
16218 intHI_type_internal_node = intHI_type_node;
16219 uintHI_type_internal_node = unsigned_intHI_type_node;
16220 intSI_type_internal_node = intSI_type_node;
16221 uintSI_type_internal_node = unsigned_intSI_type_node;
16222 intDI_type_internal_node = intDI_type_node;
16223 uintDI_type_internal_node = unsigned_intDI_type_node;
16224 intTI_type_internal_node = intTI_type_node;
16225 uintTI_type_internal_node = unsigned_intTI_type_node;
16226 float_type_internal_node = float_type_node;
16227 double_type_internal_node = double_type_node;
16228 long_double_type_internal_node = long_double_type_node;
16229 dfloat64_type_internal_node = dfloat64_type_node;
16230 dfloat128_type_internal_node = dfloat128_type_node;
16231 void_type_internal_node = void_type_node;
16233 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16234 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16235 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16236 format that uses a pair of doubles, depending on the switches and
16237 defaults. */
16238 if (TARGET_FLOAT128)
16240 ibm128_float_type_node = make_node (REAL_TYPE);
16241 TYPE_PRECISION (ibm128_float_type_node) = 128;
16242 layout_type (ibm128_float_type_node);
16243 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16245 ieee128_float_type_node = make_node (REAL_TYPE);
16246 TYPE_PRECISION (ieee128_float_type_node) = 128;
16247 layout_type (ieee128_float_type_node);
16248 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
16250 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16251 "__float128");
16253 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16254 "__ibm128");
16256 else
16258 /* All types must be nonzero, or self-test barfs during bootstrap. */
16259 ieee128_float_type_node = long_double_type_node;
16260 ibm128_float_type_node = long_double_type_node;
16263 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16264 tree type node. */
16265 builtin_mode_to_type[QImode][0] = integer_type_node;
16266 builtin_mode_to_type[HImode][0] = integer_type_node;
16267 builtin_mode_to_type[SImode][0] = intSI_type_node;
16268 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16269 builtin_mode_to_type[DImode][0] = intDI_type_node;
16270 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16271 builtin_mode_to_type[TImode][0] = intTI_type_node;
16272 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16273 builtin_mode_to_type[SFmode][0] = float_type_node;
16274 builtin_mode_to_type[DFmode][0] = double_type_node;
16275 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16276 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16277 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16278 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16279 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16280 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16281 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16282 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
16283 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
16284 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16285 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16286 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16287 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
16288 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16289 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16290 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16291 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16292 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16293 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16294 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16296 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16297 TYPE_NAME (bool_char_type_node) = tdecl;
16299 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16300 TYPE_NAME (bool_short_type_node) = tdecl;
16302 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16303 TYPE_NAME (bool_int_type_node) = tdecl;
16305 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16306 TYPE_NAME (pixel_type_node) = tdecl;
16308 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
16309 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
16310 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
16311 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
16312 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
16314 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
16315 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
16317 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
16318 TYPE_NAME (V16QI_type_node) = tdecl;
16320 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
16321 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
16323 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
16324 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
16326 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
16327 TYPE_NAME (V8HI_type_node) = tdecl;
16329 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
16330 TYPE_NAME (bool_V8HI_type_node) = tdecl;
16332 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
16333 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
16335 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
16336 TYPE_NAME (V4SI_type_node) = tdecl;
16338 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
16339 TYPE_NAME (bool_V4SI_type_node) = tdecl;
16341 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
16342 TYPE_NAME (V4SF_type_node) = tdecl;
16344 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
16345 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
16347 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
16348 TYPE_NAME (V2DF_type_node) = tdecl;
16350 if (TARGET_POWERPC64)
16352 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
16353 TYPE_NAME (V2DI_type_node) = tdecl;
16355 tdecl = add_builtin_type ("__vector unsigned long",
16356 unsigned_V2DI_type_node);
16357 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
16359 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
16360 TYPE_NAME (bool_V2DI_type_node) = tdecl;
16362 else
16364 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
16365 TYPE_NAME (V2DI_type_node) = tdecl;
16367 tdecl = add_builtin_type ("__vector unsigned long long",
16368 unsigned_V2DI_type_node);
16369 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
16371 tdecl = add_builtin_type ("__vector __bool long long",
16372 bool_V2DI_type_node);
16373 TYPE_NAME (bool_V2DI_type_node) = tdecl;
16376 if (V1TI_type_node)
16378 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
16379 TYPE_NAME (V1TI_type_node) = tdecl;
16381 tdecl = add_builtin_type ("__vector unsigned __int128",
16382 unsigned_V1TI_type_node);
16383 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
16386 /* Paired and SPE builtins are only available if you build a compiler with
16387 the appropriate options, so only create those builtins with the
16388 appropriate compiler option. Create Altivec and VSX builtins on machines
16389 with at least the general purpose extensions (970 and newer) to allow the
16390 use of the target attribute. */
16391 if (TARGET_PAIRED_FLOAT)
16392 paired_init_builtins ();
16393 if (TARGET_SPE)
16394 spe_init_builtins ();
16395 if (TARGET_EXTRA_BUILTINS)
16396 altivec_init_builtins ();
16397 if (TARGET_HTM)
16398 htm_init_builtins ();
16400 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
16401 rs6000_common_init_builtins ();
16403 ftype = build_function_type_list (ieee128_float_type_node,
16404 const_str_type_node, NULL_TREE);
16405 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
16406 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
16408 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
16409 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
16410 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
16412 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16413 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16414 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16416 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16417 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16418 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16420 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16421 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16422 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16424 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16425 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16426 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16428 mode = (TARGET_64BIT) ? DImode : SImode;
16429 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16430 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16431 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16433 ftype = build_function_type_list (unsigned_intDI_type_node,
16434 NULL_TREE);
16435 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16437 if (TARGET_64BIT)
16438 ftype = build_function_type_list (unsigned_intDI_type_node,
16439 NULL_TREE);
16440 else
16441 ftype = build_function_type_list (unsigned_intSI_type_node,
16442 NULL_TREE);
16443 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16445 ftype = build_function_type_list (double_type_node, NULL_TREE);
16446 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16448 ftype = build_function_type_list (void_type_node,
16449 intSI_type_node, double_type_node,
16450 NULL_TREE);
16451 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16453 ftype = build_function_type_list (void_type_node, NULL_TREE);
16454 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16456 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16457 NULL_TREE);
16458 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16459 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16461 #if TARGET_XCOFF
16462 /* AIX libm provides clog as __clog. */
16463 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16464 set_user_assembler_name (tdecl, "__clog");
16465 #endif
16467 #ifdef SUBTARGET_INIT_BUILTINS
16468 SUBTARGET_INIT_BUILTINS;
16469 #endif
16472 /* Returns the rs6000 builtin decl for CODE. */
16474 static tree
16475 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16477 HOST_WIDE_INT fnmask;
16479 if (code >= RS6000_BUILTIN_COUNT)
16480 return error_mark_node;
16482 fnmask = rs6000_builtin_info[code].mask;
16483 if ((fnmask & rs6000_builtin_mask) != fnmask)
16485 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16486 return error_mark_node;
16489 return rs6000_builtin_decls[code];
16492 static void
16493 spe_init_builtins (void)
16495 tree puint_type_node = build_pointer_type (unsigned_type_node);
16496 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
16497 const struct builtin_description *d;
16498 size_t i;
16500 tree v2si_ftype_4_v2si
16501 = build_function_type_list (opaque_V2SI_type_node,
16502 opaque_V2SI_type_node,
16503 opaque_V2SI_type_node,
16504 opaque_V2SI_type_node,
16505 opaque_V2SI_type_node,
16506 NULL_TREE);
16508 tree v2sf_ftype_4_v2sf
16509 = build_function_type_list (opaque_V2SF_type_node,
16510 opaque_V2SF_type_node,
16511 opaque_V2SF_type_node,
16512 opaque_V2SF_type_node,
16513 opaque_V2SF_type_node,
16514 NULL_TREE);
16516 tree int_ftype_int_v2si_v2si
16517 = build_function_type_list (integer_type_node,
16518 integer_type_node,
16519 opaque_V2SI_type_node,
16520 opaque_V2SI_type_node,
16521 NULL_TREE);
16523 tree int_ftype_int_v2sf_v2sf
16524 = build_function_type_list (integer_type_node,
16525 integer_type_node,
16526 opaque_V2SF_type_node,
16527 opaque_V2SF_type_node,
16528 NULL_TREE);
16530 tree void_ftype_v2si_puint_int
16531 = build_function_type_list (void_type_node,
16532 opaque_V2SI_type_node,
16533 puint_type_node,
16534 integer_type_node,
16535 NULL_TREE);
16537 tree void_ftype_v2si_puint_char
16538 = build_function_type_list (void_type_node,
16539 opaque_V2SI_type_node,
16540 puint_type_node,
16541 char_type_node,
16542 NULL_TREE);
16544 tree void_ftype_v2si_pv2si_int
16545 = build_function_type_list (void_type_node,
16546 opaque_V2SI_type_node,
16547 opaque_p_V2SI_type_node,
16548 integer_type_node,
16549 NULL_TREE);
16551 tree void_ftype_v2si_pv2si_char
16552 = build_function_type_list (void_type_node,
16553 opaque_V2SI_type_node,
16554 opaque_p_V2SI_type_node,
16555 char_type_node,
16556 NULL_TREE);
16558 tree void_ftype_int
16559 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16561 tree int_ftype_void
16562 = build_function_type_list (integer_type_node, NULL_TREE);
16564 tree v2si_ftype_pv2si_int
16565 = build_function_type_list (opaque_V2SI_type_node,
16566 opaque_p_V2SI_type_node,
16567 integer_type_node,
16568 NULL_TREE);
16570 tree v2si_ftype_puint_int
16571 = build_function_type_list (opaque_V2SI_type_node,
16572 puint_type_node,
16573 integer_type_node,
16574 NULL_TREE);
16576 tree v2si_ftype_pushort_int
16577 = build_function_type_list (opaque_V2SI_type_node,
16578 pushort_type_node,
16579 integer_type_node,
16580 NULL_TREE);
16582 tree v2si_ftype_signed_char
16583 = build_function_type_list (opaque_V2SI_type_node,
16584 signed_char_type_node,
16585 NULL_TREE);
16587 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
16589 /* Initialize irregular SPE builtins. */
16591 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
16592 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
16593 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
16594 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
16595 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
16596 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
16597 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
16598 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
16599 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
16600 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
16601 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
16602 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
16603 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
16604 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
16605 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
16606 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
16607 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
16608 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
16610 /* Loads. */
16611 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
16612 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
16613 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
16614 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
16615 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
16616 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
16617 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
16618 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
16619 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
16620 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
16621 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
16622 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
16623 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
16624 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
16625 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
16626 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
16627 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
16628 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
16629 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
16630 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
16631 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
16632 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
16634 /* Predicates. */
16635 d = bdesc_spe_predicates;
16636 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
16638 tree type;
16640 switch (insn_data[d->icode].operand[1].mode)
16642 case V2SImode:
16643 type = int_ftype_int_v2si_v2si;
16644 break;
16645 case V2SFmode:
16646 type = int_ftype_int_v2sf_v2sf;
16647 break;
16648 default:
16649 gcc_unreachable ();
16652 def_builtin (d->name, type, d->code);
16655 /* Evsel predicates. */
16656 d = bdesc_spe_evsel;
16657 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
16659 tree type;
16661 switch (insn_data[d->icode].operand[1].mode)
16663 case V2SImode:
16664 type = v2si_ftype_4_v2si;
16665 break;
16666 case V2SFmode:
16667 type = v2sf_ftype_4_v2sf;
16668 break;
16669 default:
16670 gcc_unreachable ();
16673 def_builtin (d->name, type, d->code);
16677 static void
16678 paired_init_builtins (void)
16680 const struct builtin_description *d;
16681 size_t i;
16683 tree int_ftype_int_v2sf_v2sf
16684 = build_function_type_list (integer_type_node,
16685 integer_type_node,
16686 V2SF_type_node,
16687 V2SF_type_node,
16688 NULL_TREE);
16689 tree pcfloat_type_node =
16690 build_pointer_type (build_qualified_type
16691 (float_type_node, TYPE_QUAL_CONST));
16693 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
16694 long_integer_type_node,
16695 pcfloat_type_node,
16696 NULL_TREE);
16697 tree void_ftype_v2sf_long_pcfloat =
16698 build_function_type_list (void_type_node,
16699 V2SF_type_node,
16700 long_integer_type_node,
16701 pcfloat_type_node,
16702 NULL_TREE);
16705 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
16706 PAIRED_BUILTIN_LX);
16709 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
16710 PAIRED_BUILTIN_STX);
16712 /* Predicates. */
16713 d = bdesc_paired_preds;
16714 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
16716 tree type;
16718 if (TARGET_DEBUG_BUILTIN)
16719 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
16720 (int)i, get_insn_name (d->icode), (int)d->icode,
16721 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
16723 switch (insn_data[d->icode].operand[1].mode)
16725 case V2SFmode:
16726 type = int_ftype_int_v2sf_v2sf;
16727 break;
16728 default:
16729 gcc_unreachable ();
16732 def_builtin (d->name, type, d->code);
16736 static void
16737 altivec_init_builtins (void)
16739 const struct builtin_description *d;
16740 size_t i;
16741 tree ftype;
16742 tree decl;
16744 tree pvoid_type_node = build_pointer_type (void_type_node);
16746 tree pcvoid_type_node
16747 = build_pointer_type (build_qualified_type (void_type_node,
16748 TYPE_QUAL_CONST));
16750 tree int_ftype_opaque
16751 = build_function_type_list (integer_type_node,
16752 opaque_V4SI_type_node, NULL_TREE);
16753 tree opaque_ftype_opaque
16754 = build_function_type_list (integer_type_node, NULL_TREE);
16755 tree opaque_ftype_opaque_int
16756 = build_function_type_list (opaque_V4SI_type_node,
16757 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16758 tree opaque_ftype_opaque_opaque_int
16759 = build_function_type_list (opaque_V4SI_type_node,
16760 opaque_V4SI_type_node, opaque_V4SI_type_node,
16761 integer_type_node, NULL_TREE);
16762 tree opaque_ftype_opaque_opaque_opaque
16763 = build_function_type_list (opaque_V4SI_type_node,
16764 opaque_V4SI_type_node, opaque_V4SI_type_node,
16765 opaque_V4SI_type_node, NULL_TREE);
16766 tree opaque_ftype_opaque_opaque
16767 = build_function_type_list (opaque_V4SI_type_node,
16768 opaque_V4SI_type_node, opaque_V4SI_type_node,
16769 NULL_TREE);
16770 tree int_ftype_int_opaque_opaque
16771 = build_function_type_list (integer_type_node,
16772 integer_type_node, opaque_V4SI_type_node,
16773 opaque_V4SI_type_node, NULL_TREE);
16774 tree int_ftype_int_v4si_v4si
16775 = build_function_type_list (integer_type_node,
16776 integer_type_node, V4SI_type_node,
16777 V4SI_type_node, NULL_TREE);
16778 tree int_ftype_int_v2di_v2di
16779 = build_function_type_list (integer_type_node,
16780 integer_type_node, V2DI_type_node,
16781 V2DI_type_node, NULL_TREE);
16782 tree void_ftype_v4si
16783 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16784 tree v8hi_ftype_void
16785 = build_function_type_list (V8HI_type_node, NULL_TREE);
16786 tree void_ftype_void
16787 = build_function_type_list (void_type_node, NULL_TREE);
16788 tree void_ftype_int
16789 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16791 tree opaque_ftype_long_pcvoid
16792 = build_function_type_list (opaque_V4SI_type_node,
16793 long_integer_type_node, pcvoid_type_node,
16794 NULL_TREE);
16795 tree v16qi_ftype_long_pcvoid
16796 = build_function_type_list (V16QI_type_node,
16797 long_integer_type_node, pcvoid_type_node,
16798 NULL_TREE);
16799 tree v8hi_ftype_long_pcvoid
16800 = build_function_type_list (V8HI_type_node,
16801 long_integer_type_node, pcvoid_type_node,
16802 NULL_TREE);
16803 tree v4si_ftype_long_pcvoid
16804 = build_function_type_list (V4SI_type_node,
16805 long_integer_type_node, pcvoid_type_node,
16806 NULL_TREE);
16807 tree v4sf_ftype_long_pcvoid
16808 = build_function_type_list (V4SF_type_node,
16809 long_integer_type_node, pcvoid_type_node,
16810 NULL_TREE);
16811 tree v2df_ftype_long_pcvoid
16812 = build_function_type_list (V2DF_type_node,
16813 long_integer_type_node, pcvoid_type_node,
16814 NULL_TREE);
16815 tree v2di_ftype_long_pcvoid
16816 = build_function_type_list (V2DI_type_node,
16817 long_integer_type_node, pcvoid_type_node,
16818 NULL_TREE);
16820 tree void_ftype_opaque_long_pvoid
16821 = build_function_type_list (void_type_node,
16822 opaque_V4SI_type_node, long_integer_type_node,
16823 pvoid_type_node, NULL_TREE);
16824 tree void_ftype_v4si_long_pvoid
16825 = build_function_type_list (void_type_node,
16826 V4SI_type_node, long_integer_type_node,
16827 pvoid_type_node, NULL_TREE);
16828 tree void_ftype_v16qi_long_pvoid
16829 = build_function_type_list (void_type_node,
16830 V16QI_type_node, long_integer_type_node,
16831 pvoid_type_node, NULL_TREE);
16832 tree void_ftype_v8hi_long_pvoid
16833 = build_function_type_list (void_type_node,
16834 V8HI_type_node, long_integer_type_node,
16835 pvoid_type_node, NULL_TREE);
16836 tree void_ftype_v4sf_long_pvoid
16837 = build_function_type_list (void_type_node,
16838 V4SF_type_node, long_integer_type_node,
16839 pvoid_type_node, NULL_TREE);
16840 tree void_ftype_v2df_long_pvoid
16841 = build_function_type_list (void_type_node,
16842 V2DF_type_node, long_integer_type_node,
16843 pvoid_type_node, NULL_TREE);
16844 tree void_ftype_v2di_long_pvoid
16845 = build_function_type_list (void_type_node,
16846 V2DI_type_node, long_integer_type_node,
16847 pvoid_type_node, NULL_TREE);
16848 tree int_ftype_int_v8hi_v8hi
16849 = build_function_type_list (integer_type_node,
16850 integer_type_node, V8HI_type_node,
16851 V8HI_type_node, NULL_TREE);
16852 tree int_ftype_int_v16qi_v16qi
16853 = build_function_type_list (integer_type_node,
16854 integer_type_node, V16QI_type_node,
16855 V16QI_type_node, NULL_TREE);
16856 tree int_ftype_int_v4sf_v4sf
16857 = build_function_type_list (integer_type_node,
16858 integer_type_node, V4SF_type_node,
16859 V4SF_type_node, NULL_TREE);
16860 tree int_ftype_int_v2df_v2df
16861 = build_function_type_list (integer_type_node,
16862 integer_type_node, V2DF_type_node,
16863 V2DF_type_node, NULL_TREE);
16864 tree v2di_ftype_v2di
16865 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16866 tree v4si_ftype_v4si
16867 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16868 tree v8hi_ftype_v8hi
16869 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16870 tree v16qi_ftype_v16qi
16871 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16872 tree v4sf_ftype_v4sf
16873 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16874 tree v2df_ftype_v2df
16875 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16876 tree void_ftype_pcvoid_int_int
16877 = build_function_type_list (void_type_node,
16878 pcvoid_type_node, integer_type_node,
16879 integer_type_node, NULL_TREE);
16881 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16882 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16883 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16884 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16885 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16886 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16887 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16888 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16889 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16890 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16891 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16892 ALTIVEC_BUILTIN_LVXL_V2DF);
16893 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16894 ALTIVEC_BUILTIN_LVXL_V2DI);
16895 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16896 ALTIVEC_BUILTIN_LVXL_V4SF);
16897 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16898 ALTIVEC_BUILTIN_LVXL_V4SI);
16899 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16900 ALTIVEC_BUILTIN_LVXL_V8HI);
16901 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16902 ALTIVEC_BUILTIN_LVXL_V16QI);
16903 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16904 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16905 ALTIVEC_BUILTIN_LVX_V2DF);
16906 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16907 ALTIVEC_BUILTIN_LVX_V2DI);
16908 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16909 ALTIVEC_BUILTIN_LVX_V4SF);
16910 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16911 ALTIVEC_BUILTIN_LVX_V4SI);
16912 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16913 ALTIVEC_BUILTIN_LVX_V8HI);
16914 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16915 ALTIVEC_BUILTIN_LVX_V16QI);
16916 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16917 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16918 ALTIVEC_BUILTIN_STVX_V2DF);
16919 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16920 ALTIVEC_BUILTIN_STVX_V2DI);
16921 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16922 ALTIVEC_BUILTIN_STVX_V4SF);
16923 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16924 ALTIVEC_BUILTIN_STVX_V4SI);
16925 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16926 ALTIVEC_BUILTIN_STVX_V8HI);
16927 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16928 ALTIVEC_BUILTIN_STVX_V16QI);
16929 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16930 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16931 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16932 ALTIVEC_BUILTIN_STVXL_V2DF);
16933 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16934 ALTIVEC_BUILTIN_STVXL_V2DI);
16935 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16936 ALTIVEC_BUILTIN_STVXL_V4SF);
16937 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16938 ALTIVEC_BUILTIN_STVXL_V4SI);
16939 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16940 ALTIVEC_BUILTIN_STVXL_V8HI);
16941 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16942 ALTIVEC_BUILTIN_STVXL_V16QI);
16943 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16944 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16945 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16946 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16947 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16948 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16949 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16950 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16951 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16952 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16953 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16954 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16955 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16956 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16957 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16958 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16960 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16961 VSX_BUILTIN_LXVD2X_V2DF);
16962 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16963 VSX_BUILTIN_LXVD2X_V2DI);
16964 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16965 VSX_BUILTIN_LXVW4X_V4SF);
16966 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16967 VSX_BUILTIN_LXVW4X_V4SI);
16968 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16969 VSX_BUILTIN_LXVW4X_V8HI);
16970 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16971 VSX_BUILTIN_LXVW4X_V16QI);
16972 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16973 VSX_BUILTIN_STXVD2X_V2DF);
16974 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16975 VSX_BUILTIN_STXVD2X_V2DI);
16976 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16977 VSX_BUILTIN_STXVW4X_V4SF);
16978 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16979 VSX_BUILTIN_STXVW4X_V4SI);
16980 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16981 VSX_BUILTIN_STXVW4X_V8HI);
16982 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16983 VSX_BUILTIN_STXVW4X_V16QI);
16985 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16986 VSX_BUILTIN_LD_ELEMREV_V2DF);
16987 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16988 VSX_BUILTIN_LD_ELEMREV_V2DI);
16989 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16990 VSX_BUILTIN_LD_ELEMREV_V4SF);
16991 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16992 VSX_BUILTIN_LD_ELEMREV_V4SI);
16993 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16994 VSX_BUILTIN_ST_ELEMREV_V2DF);
16995 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16996 VSX_BUILTIN_ST_ELEMREV_V2DI);
16997 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16998 VSX_BUILTIN_ST_ELEMREV_V4SF);
16999 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17000 VSX_BUILTIN_ST_ELEMREV_V4SI);
17002 if (TARGET_P9_VECTOR)
17004 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17005 VSX_BUILTIN_LD_ELEMREV_V8HI);
17006 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17007 VSX_BUILTIN_LD_ELEMREV_V16QI);
17008 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17009 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
17010 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17011 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
17014 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17015 VSX_BUILTIN_VEC_LD);
17016 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17017 VSX_BUILTIN_VEC_ST);
17018 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17019 VSX_BUILTIN_VEC_XL);
17020 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17021 VSX_BUILTIN_VEC_XST);
17023 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17024 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17025 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17027 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17028 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17029 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17030 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17031 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17032 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17033 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17034 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17035 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17036 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17037 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17038 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17040 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17041 ALTIVEC_BUILTIN_VEC_ADDE);
17042 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17043 ALTIVEC_BUILTIN_VEC_ADDEC);
17044 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17045 ALTIVEC_BUILTIN_VEC_CMPNE);
17046 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17047 ALTIVEC_BUILTIN_VEC_MUL);
17049 /* Cell builtins. */
17050 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17051 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17052 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17053 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17055 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17056 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17057 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17058 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17060 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17061 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17062 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17063 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17065 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17066 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17067 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17068 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17070 /* Add the DST variants. */
17071 d = bdesc_dst;
17072 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17073 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17075 /* Initialize the predicates. */
17076 d = bdesc_altivec_preds;
17077 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17079 machine_mode mode1;
17080 tree type;
17082 if (rs6000_overloaded_builtin_p (d->code))
17083 mode1 = VOIDmode;
17084 else
17085 mode1 = insn_data[d->icode].operand[1].mode;
17087 switch (mode1)
17089 case VOIDmode:
17090 type = int_ftype_int_opaque_opaque;
17091 break;
17092 case V2DImode:
17093 type = int_ftype_int_v2di_v2di;
17094 break;
17095 case V4SImode:
17096 type = int_ftype_int_v4si_v4si;
17097 break;
17098 case V8HImode:
17099 type = int_ftype_int_v8hi_v8hi;
17100 break;
17101 case V16QImode:
17102 type = int_ftype_int_v16qi_v16qi;
17103 break;
17104 case V4SFmode:
17105 type = int_ftype_int_v4sf_v4sf;
17106 break;
17107 case V2DFmode:
17108 type = int_ftype_int_v2df_v2df;
17109 break;
17110 default:
17111 gcc_unreachable ();
17114 def_builtin (d->name, type, d->code);
17117 /* Initialize the abs* operators. */
17118 d = bdesc_abs;
17119 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17121 machine_mode mode0;
17122 tree type;
17124 mode0 = insn_data[d->icode].operand[0].mode;
17126 switch (mode0)
17128 case V2DImode:
17129 type = v2di_ftype_v2di;
17130 break;
17131 case V4SImode:
17132 type = v4si_ftype_v4si;
17133 break;
17134 case V8HImode:
17135 type = v8hi_ftype_v8hi;
17136 break;
17137 case V16QImode:
17138 type = v16qi_ftype_v16qi;
17139 break;
17140 case V4SFmode:
17141 type = v4sf_ftype_v4sf;
17142 break;
17143 case V2DFmode:
17144 type = v2df_ftype_v2df;
17145 break;
17146 default:
17147 gcc_unreachable ();
17150 def_builtin (d->name, type, d->code);
17153 /* Initialize target builtin that implements
17154 targetm.vectorize.builtin_mask_for_load. */
17156 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17157 v16qi_ftype_long_pcvoid,
17158 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17159 BUILT_IN_MD, NULL, NULL_TREE);
17160 TREE_READONLY (decl) = 1;
17161 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17162 altivec_builtin_mask_for_load = decl;
17164 /* Access to the vec_init patterns. */
17165 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17166 integer_type_node, integer_type_node,
17167 integer_type_node, NULL_TREE);
17168 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17170 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17171 short_integer_type_node,
17172 short_integer_type_node,
17173 short_integer_type_node,
17174 short_integer_type_node,
17175 short_integer_type_node,
17176 short_integer_type_node,
17177 short_integer_type_node, NULL_TREE);
17178 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17180 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17181 char_type_node, char_type_node,
17182 char_type_node, char_type_node,
17183 char_type_node, char_type_node,
17184 char_type_node, char_type_node,
17185 char_type_node, char_type_node,
17186 char_type_node, char_type_node,
17187 char_type_node, char_type_node,
17188 char_type_node, NULL_TREE);
17189 def_builtin ("__builtin_vec_init_v16qi", ftype,
17190 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17192 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17193 float_type_node, float_type_node,
17194 float_type_node, NULL_TREE);
17195 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17197 /* VSX builtins. */
17198 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17199 double_type_node, NULL_TREE);
17200 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17202 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17203 intDI_type_node, NULL_TREE);
17204 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17206 /* Access to the vec_set patterns. */
17207 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17208 intSI_type_node,
17209 integer_type_node, NULL_TREE);
17210 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17212 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17213 intHI_type_node,
17214 integer_type_node, NULL_TREE);
17215 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17217 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17218 intQI_type_node,
17219 integer_type_node, NULL_TREE);
17220 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17222 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17223 float_type_node,
17224 integer_type_node, NULL_TREE);
17225 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17227 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17228 double_type_node,
17229 integer_type_node, NULL_TREE);
17230 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17232 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17233 intDI_type_node,
17234 integer_type_node, NULL_TREE);
17235 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17237 /* Access to the vec_extract patterns. */
17238 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17239 integer_type_node, NULL_TREE);
17240 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17242 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17243 integer_type_node, NULL_TREE);
17244 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17246 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17247 integer_type_node, NULL_TREE);
17248 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17250 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17251 integer_type_node, NULL_TREE);
17252 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17254 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17255 integer_type_node, NULL_TREE);
17256 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17258 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17259 integer_type_node, NULL_TREE);
17260 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17263 if (V1TI_type_node)
17265 tree v1ti_ftype_long_pcvoid
17266 = build_function_type_list (V1TI_type_node,
17267 long_integer_type_node, pcvoid_type_node,
17268 NULL_TREE);
17269 tree void_ftype_v1ti_long_pvoid
17270 = build_function_type_list (void_type_node,
17271 V1TI_type_node, long_integer_type_node,
17272 pvoid_type_node, NULL_TREE);
17273 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17274 VSX_BUILTIN_LXVD2X_V1TI);
17275 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17276 VSX_BUILTIN_STXVD2X_V1TI);
17277 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17278 NULL_TREE, NULL_TREE);
17279 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17280 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17281 intTI_type_node,
17282 integer_type_node, NULL_TREE);
17283 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17284 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17285 integer_type_node, NULL_TREE);
17286 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17291 static void
17292 htm_init_builtins (void)
17294 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17295 const struct builtin_description *d;
17296 size_t i;
17298 d = bdesc_htm;
17299 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17301 tree op[MAX_HTM_OPERANDS], type;
17302 HOST_WIDE_INT mask = d->mask;
17303 unsigned attr = rs6000_builtin_info[d->code].attr;
17304 bool void_func = (attr & RS6000_BTC_VOID);
17305 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17306 int nopnds = 0;
17307 tree gpr_type_node;
17308 tree rettype;
17309 tree argtype;
17311 if (TARGET_32BIT && TARGET_POWERPC64)
17312 gpr_type_node = long_long_unsigned_type_node;
17313 else
17314 gpr_type_node = long_unsigned_type_node;
17316 if (attr & RS6000_BTC_SPR)
17318 rettype = gpr_type_node;
17319 argtype = gpr_type_node;
17321 else if (d->code == HTM_BUILTIN_TABORTDC
17322 || d->code == HTM_BUILTIN_TABORTDCI)
17324 rettype = unsigned_type_node;
17325 argtype = gpr_type_node;
17327 else
17329 rettype = unsigned_type_node;
17330 argtype = unsigned_type_node;
17333 if ((mask & builtin_mask) != mask)
17335 if (TARGET_DEBUG_BUILTIN)
17336 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17337 continue;
17340 if (d->name == 0)
17342 if (TARGET_DEBUG_BUILTIN)
17343 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17344 (long unsigned) i);
17345 continue;
17348 op[nopnds++] = (void_func) ? void_type_node : rettype;
17350 if (attr_args == RS6000_BTC_UNARY)
17351 op[nopnds++] = argtype;
17352 else if (attr_args == RS6000_BTC_BINARY)
17354 op[nopnds++] = argtype;
17355 op[nopnds++] = argtype;
17357 else if (attr_args == RS6000_BTC_TERNARY)
17359 op[nopnds++] = argtype;
17360 op[nopnds++] = argtype;
17361 op[nopnds++] = argtype;
17364 switch (nopnds)
17366 case 1:
17367 type = build_function_type_list (op[0], NULL_TREE);
17368 break;
17369 case 2:
17370 type = build_function_type_list (op[0], op[1], NULL_TREE);
17371 break;
17372 case 3:
17373 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17374 break;
17375 case 4:
17376 type = build_function_type_list (op[0], op[1], op[2], op[3],
17377 NULL_TREE);
17378 break;
17379 default:
17380 gcc_unreachable ();
17383 def_builtin (d->name, type, d->code);
17387 /* Hash function for builtin functions with up to 3 arguments and a return
17388 type. */
17389 hashval_t
17390 builtin_hasher::hash (builtin_hash_struct *bh)
17392 unsigned ret = 0;
17393 int i;
17395 for (i = 0; i < 4; i++)
17397 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17398 ret = (ret * 2) + bh->uns_p[i];
17401 return ret;
17404 /* Compare builtin hash entries H1 and H2 for equivalence. */
17405 bool
17406 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17408 return ((p1->mode[0] == p2->mode[0])
17409 && (p1->mode[1] == p2->mode[1])
17410 && (p1->mode[2] == p2->mode[2])
17411 && (p1->mode[3] == p2->mode[3])
17412 && (p1->uns_p[0] == p2->uns_p[0])
17413 && (p1->uns_p[1] == p2->uns_p[1])
17414 && (p1->uns_p[2] == p2->uns_p[2])
17415 && (p1->uns_p[3] == p2->uns_p[3]));
17418 /* Map types for builtin functions with an explicit return type and up to 3
17419 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17420 of the argument. */
17421 static tree
17422 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17423 machine_mode mode_arg1, machine_mode mode_arg2,
17424 enum rs6000_builtins builtin, const char *name)
17426 struct builtin_hash_struct h;
17427 struct builtin_hash_struct *h2;
17428 int num_args = 3;
17429 int i;
17430 tree ret_type = NULL_TREE;
17431 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17433 /* Create builtin_hash_table. */
17434 if (builtin_hash_table == NULL)
17435 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17437 h.type = NULL_TREE;
17438 h.mode[0] = mode_ret;
17439 h.mode[1] = mode_arg0;
17440 h.mode[2] = mode_arg1;
17441 h.mode[3] = mode_arg2;
17442 h.uns_p[0] = 0;
17443 h.uns_p[1] = 0;
17444 h.uns_p[2] = 0;
17445 h.uns_p[3] = 0;
17447 /* If the builtin is a type that produces unsigned results or takes unsigned
17448 arguments, and it is returned as a decl for the vectorizer (such as
17449 widening multiplies, permute), make sure the arguments and return value
17450 are type correct. */
17451 switch (builtin)
17453 /* unsigned 1 argument functions. */
17454 case CRYPTO_BUILTIN_VSBOX:
17455 case P8V_BUILTIN_VGBBD:
17456 case MISC_BUILTIN_CDTBCD:
17457 case MISC_BUILTIN_CBCDTD:
17458 h.uns_p[0] = 1;
17459 h.uns_p[1] = 1;
17460 break;
17462 /* unsigned 2 argument functions. */
17463 case ALTIVEC_BUILTIN_VMULEUB_UNS:
17464 case ALTIVEC_BUILTIN_VMULEUH_UNS:
17465 case ALTIVEC_BUILTIN_VMULOUB_UNS:
17466 case ALTIVEC_BUILTIN_VMULOUH_UNS:
17467 case CRYPTO_BUILTIN_VCIPHER:
17468 case CRYPTO_BUILTIN_VCIPHERLAST:
17469 case CRYPTO_BUILTIN_VNCIPHER:
17470 case CRYPTO_BUILTIN_VNCIPHERLAST:
17471 case CRYPTO_BUILTIN_VPMSUMB:
17472 case CRYPTO_BUILTIN_VPMSUMH:
17473 case CRYPTO_BUILTIN_VPMSUMW:
17474 case CRYPTO_BUILTIN_VPMSUMD:
17475 case CRYPTO_BUILTIN_VPMSUM:
17476 case MISC_BUILTIN_ADDG6S:
17477 case MISC_BUILTIN_DIVWEU:
17478 case MISC_BUILTIN_DIVWEUO:
17479 case MISC_BUILTIN_DIVDEU:
17480 case MISC_BUILTIN_DIVDEUO:
17481 h.uns_p[0] = 1;
17482 h.uns_p[1] = 1;
17483 h.uns_p[2] = 1;
17484 break;
17486 /* unsigned 3 argument functions. */
17487 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17488 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17489 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17490 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17491 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17492 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17493 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17494 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17495 case VSX_BUILTIN_VPERM_16QI_UNS:
17496 case VSX_BUILTIN_VPERM_8HI_UNS:
17497 case VSX_BUILTIN_VPERM_4SI_UNS:
17498 case VSX_BUILTIN_VPERM_2DI_UNS:
17499 case VSX_BUILTIN_XXSEL_16QI_UNS:
17500 case VSX_BUILTIN_XXSEL_8HI_UNS:
17501 case VSX_BUILTIN_XXSEL_4SI_UNS:
17502 case VSX_BUILTIN_XXSEL_2DI_UNS:
17503 case CRYPTO_BUILTIN_VPERMXOR:
17504 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17505 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17506 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17507 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17508 case CRYPTO_BUILTIN_VSHASIGMAW:
17509 case CRYPTO_BUILTIN_VSHASIGMAD:
17510 case CRYPTO_BUILTIN_VSHASIGMA:
17511 h.uns_p[0] = 1;
17512 h.uns_p[1] = 1;
17513 h.uns_p[2] = 1;
17514 h.uns_p[3] = 1;
17515 break;
17517 /* signed permute functions with unsigned char mask. */
17518 case ALTIVEC_BUILTIN_VPERM_16QI:
17519 case ALTIVEC_BUILTIN_VPERM_8HI:
17520 case ALTIVEC_BUILTIN_VPERM_4SI:
17521 case ALTIVEC_BUILTIN_VPERM_4SF:
17522 case ALTIVEC_BUILTIN_VPERM_2DI:
17523 case ALTIVEC_BUILTIN_VPERM_2DF:
17524 case VSX_BUILTIN_VPERM_16QI:
17525 case VSX_BUILTIN_VPERM_8HI:
17526 case VSX_BUILTIN_VPERM_4SI:
17527 case VSX_BUILTIN_VPERM_4SF:
17528 case VSX_BUILTIN_VPERM_2DI:
17529 case VSX_BUILTIN_VPERM_2DF:
17530 h.uns_p[3] = 1;
17531 break;
17533 /* unsigned args, signed return. */
17534 case VSX_BUILTIN_XVCVUXDDP_UNS:
17535 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17536 h.uns_p[1] = 1;
17537 break;
17539 /* signed args, unsigned return. */
17540 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17541 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17542 case MISC_BUILTIN_UNPACK_TD:
17543 case MISC_BUILTIN_UNPACK_V1TI:
17544 h.uns_p[0] = 1;
17545 break;
17547 /* unsigned arguments for 128-bit pack instructions. */
17548 case MISC_BUILTIN_PACK_TD:
17549 case MISC_BUILTIN_PACK_V1TI:
17550 h.uns_p[1] = 1;
17551 h.uns_p[2] = 1;
17552 break;
17554 default:
17555 break;
17558 /* Figure out how many args are present. */
17559 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17560 num_args--;
17562 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17563 if (!ret_type && h.uns_p[0])
17564 ret_type = builtin_mode_to_type[h.mode[0]][0];
17566 if (!ret_type)
17567 fatal_error (input_location,
17568 "internal error: builtin function %s had an unexpected "
17569 "return type %s", name, GET_MODE_NAME (h.mode[0]));
17571 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17572 arg_type[i] = NULL_TREE;
17574 for (i = 0; i < num_args; i++)
17576 int m = (int) h.mode[i+1];
17577 int uns_p = h.uns_p[i+1];
17579 arg_type[i] = builtin_mode_to_type[m][uns_p];
17580 if (!arg_type[i] && uns_p)
17581 arg_type[i] = builtin_mode_to_type[m][0];
17583 if (!arg_type[i])
17584 fatal_error (input_location,
17585 "internal error: builtin function %s, argument %d "
17586 "had unexpected argument type %s", name, i,
17587 GET_MODE_NAME (m));
17590 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17591 if (*found == NULL)
17593 h2 = ggc_alloc<builtin_hash_struct> ();
17594 *h2 = h;
17595 *found = h2;
17597 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17598 arg_type[2], NULL_TREE);
17601 return (*found)->type;
17604 static void
17605 rs6000_common_init_builtins (void)
17607 const struct builtin_description *d;
17608 size_t i;
17610 tree opaque_ftype_opaque = NULL_TREE;
17611 tree opaque_ftype_opaque_opaque = NULL_TREE;
17612 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17613 tree v2si_ftype = NULL_TREE;
17614 tree v2si_ftype_qi = NULL_TREE;
17615 tree v2si_ftype_v2si_qi = NULL_TREE;
17616 tree v2si_ftype_int_qi = NULL_TREE;
17617 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17619 if (!TARGET_PAIRED_FLOAT)
17621 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
17622 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
17625 /* Paired and SPE builtins are only available if you build a compiler with
17626 the appropriate options, so only create those builtins with the
17627 appropriate compiler option. Create Altivec and VSX builtins on machines
17628 with at least the general purpose extensions (970 and newer) to allow the
17629 use of the target attribute.. */
17631 if (TARGET_EXTRA_BUILTINS)
17632 builtin_mask |= RS6000_BTM_COMMON;
17634 /* Add the ternary operators. */
17635 d = bdesc_3arg;
17636 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17638 tree type;
17639 HOST_WIDE_INT mask = d->mask;
17641 if ((mask & builtin_mask) != mask)
17643 if (TARGET_DEBUG_BUILTIN)
17644 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17645 continue;
17648 if (rs6000_overloaded_builtin_p (d->code))
17650 if (! (type = opaque_ftype_opaque_opaque_opaque))
17651 type = opaque_ftype_opaque_opaque_opaque
17652 = build_function_type_list (opaque_V4SI_type_node,
17653 opaque_V4SI_type_node,
17654 opaque_V4SI_type_node,
17655 opaque_V4SI_type_node,
17656 NULL_TREE);
17658 else
17660 enum insn_code icode = d->icode;
17661 if (d->name == 0)
17663 if (TARGET_DEBUG_BUILTIN)
17664 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17665 (long unsigned)i);
17667 continue;
17670 if (icode == CODE_FOR_nothing)
17672 if (TARGET_DEBUG_BUILTIN)
17673 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17674 d->name);
17676 continue;
17679 type = builtin_function_type (insn_data[icode].operand[0].mode,
17680 insn_data[icode].operand[1].mode,
17681 insn_data[icode].operand[2].mode,
17682 insn_data[icode].operand[3].mode,
17683 d->code, d->name);
17686 def_builtin (d->name, type, d->code);
17689 /* Add the binary operators. */
17690 d = bdesc_2arg;
17691 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17693 machine_mode mode0, mode1, mode2;
17694 tree type;
17695 HOST_WIDE_INT mask = d->mask;
17697 if ((mask & builtin_mask) != mask)
17699 if (TARGET_DEBUG_BUILTIN)
17700 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17701 continue;
17704 if (rs6000_overloaded_builtin_p (d->code))
17706 if (! (type = opaque_ftype_opaque_opaque))
17707 type = opaque_ftype_opaque_opaque
17708 = build_function_type_list (opaque_V4SI_type_node,
17709 opaque_V4SI_type_node,
17710 opaque_V4SI_type_node,
17711 NULL_TREE);
17713 else
17715 enum insn_code icode = d->icode;
17716 if (d->name == 0)
17718 if (TARGET_DEBUG_BUILTIN)
17719 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17720 (long unsigned)i);
17722 continue;
17725 if (icode == CODE_FOR_nothing)
17727 if (TARGET_DEBUG_BUILTIN)
17728 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17729 d->name);
17731 continue;
17734 mode0 = insn_data[icode].operand[0].mode;
17735 mode1 = insn_data[icode].operand[1].mode;
17736 mode2 = insn_data[icode].operand[2].mode;
17738 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
17740 if (! (type = v2si_ftype_v2si_qi))
17741 type = v2si_ftype_v2si_qi
17742 = build_function_type_list (opaque_V2SI_type_node,
17743 opaque_V2SI_type_node,
17744 char_type_node,
17745 NULL_TREE);
17748 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
17749 && mode2 == QImode)
17751 if (! (type = v2si_ftype_int_qi))
17752 type = v2si_ftype_int_qi
17753 = build_function_type_list (opaque_V2SI_type_node,
17754 integer_type_node,
17755 char_type_node,
17756 NULL_TREE);
17759 else
17760 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17761 d->code, d->name);
17764 def_builtin (d->name, type, d->code);
17767 /* Add the simple unary operators. */
17768 d = bdesc_1arg;
17769 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17771 machine_mode mode0, mode1;
17772 tree type;
17773 HOST_WIDE_INT mask = d->mask;
17775 if ((mask & builtin_mask) != mask)
17777 if (TARGET_DEBUG_BUILTIN)
17778 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17779 continue;
17782 if (rs6000_overloaded_builtin_p (d->code))
17784 if (! (type = opaque_ftype_opaque))
17785 type = opaque_ftype_opaque
17786 = build_function_type_list (opaque_V4SI_type_node,
17787 opaque_V4SI_type_node,
17788 NULL_TREE);
17790 else
17792 enum insn_code icode = d->icode;
17793 if (d->name == 0)
17795 if (TARGET_DEBUG_BUILTIN)
17796 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17797 (long unsigned)i);
17799 continue;
17802 if (icode == CODE_FOR_nothing)
17804 if (TARGET_DEBUG_BUILTIN)
17805 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17806 d->name);
17808 continue;
17811 mode0 = insn_data[icode].operand[0].mode;
17812 mode1 = insn_data[icode].operand[1].mode;
17814 if (mode0 == V2SImode && mode1 == QImode)
17816 if (! (type = v2si_ftype_qi))
17817 type = v2si_ftype_qi
17818 = build_function_type_list (opaque_V2SI_type_node,
17819 char_type_node,
17820 NULL_TREE);
17823 else
17824 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17825 d->code, d->name);
17828 def_builtin (d->name, type, d->code);
17831 /* Add the simple no-argument operators. */
17832 d = bdesc_0arg;
17833 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17835 machine_mode mode0;
17836 tree type;
17837 HOST_WIDE_INT mask = d->mask;
17839 if ((mask & builtin_mask) != mask)
17841 if (TARGET_DEBUG_BUILTIN)
17842 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17843 continue;
17845 if (rs6000_overloaded_builtin_p (d->code))
17847 if (!opaque_ftype_opaque)
17848 opaque_ftype_opaque
17849 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17850 type = opaque_ftype_opaque;
17852 else
17854 enum insn_code icode = d->icode;
17855 if (d->name == 0)
17857 if (TARGET_DEBUG_BUILTIN)
17858 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17859 (long unsigned) i);
17860 continue;
17862 if (icode == CODE_FOR_nothing)
17864 if (TARGET_DEBUG_BUILTIN)
17865 fprintf (stderr,
17866 "rs6000_builtin, skip no-argument %s (no code)\n",
17867 d->name);
17868 continue;
17870 mode0 = insn_data[icode].operand[0].mode;
17871 if (mode0 == V2SImode)
17873 /* code for SPE */
17874 if (! (type = v2si_ftype))
17876 v2si_ftype
17877 = build_function_type_list (opaque_V2SI_type_node,
17878 NULL_TREE);
17879 type = v2si_ftype;
17882 else
17883 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17884 d->code, d->name);
17886 def_builtin (d->name, type, d->code);
17890 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17891 static void
17892 init_float128_ibm (machine_mode mode)
17894 if (!TARGET_XL_COMPAT)
17896 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17897 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17898 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17899 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17901 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
17903 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17904 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17905 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17906 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17907 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17908 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17909 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17911 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17912 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17913 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17914 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17915 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17916 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17917 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17918 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17921 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
17922 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17924 else
17926 set_optab_libfunc (add_optab, mode, "_xlqadd");
17927 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17928 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17929 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17932 /* Add various conversions for IFmode to use the traditional TFmode
17933 names. */
17934 if (mode == IFmode)
17936 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
17937 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
17938 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
17939 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
17940 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
17941 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
17943 if (TARGET_POWERPC64)
17945 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17946 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17947 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17948 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17953 /* Set up IEEE 128-bit floating point routines. Use different names if the
17954 arguments can be passed in a vector register. The historical PowerPC
17955 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17956 continue to use that if we aren't using vector registers to pass IEEE
17957 128-bit floating point. */
17959 static void
17960 init_float128_ieee (machine_mode mode)
17962 if (FLOAT128_VECTOR_P (mode))
17964 set_optab_libfunc (add_optab, mode, "__addkf3");
17965 set_optab_libfunc (sub_optab, mode, "__subkf3");
17966 set_optab_libfunc (neg_optab, mode, "__negkf2");
17967 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17968 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17969 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17970 set_optab_libfunc (abs_optab, mode, "__abstkf2");
17972 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17973 set_optab_libfunc (ne_optab, mode, "__nekf2");
17974 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17975 set_optab_libfunc (ge_optab, mode, "__gekf2");
17976 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17977 set_optab_libfunc (le_optab, mode, "__lekf2");
17978 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17980 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17981 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17982 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17983 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17985 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
17986 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17987 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
17989 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
17990 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17991 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
17993 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
17994 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
17995 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
17996 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
17997 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
17998 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18000 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18001 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18002 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18003 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18005 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18006 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18007 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18008 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18010 if (TARGET_POWERPC64)
18012 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18013 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18014 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18015 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18019 else
18021 set_optab_libfunc (add_optab, mode, "_q_add");
18022 set_optab_libfunc (sub_optab, mode, "_q_sub");
18023 set_optab_libfunc (neg_optab, mode, "_q_neg");
18024 set_optab_libfunc (smul_optab, mode, "_q_mul");
18025 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18026 if (TARGET_PPC_GPOPT)
18027 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18029 set_optab_libfunc (eq_optab, mode, "_q_feq");
18030 set_optab_libfunc (ne_optab, mode, "_q_fne");
18031 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18032 set_optab_libfunc (ge_optab, mode, "_q_fge");
18033 set_optab_libfunc (lt_optab, mode, "_q_flt");
18034 set_optab_libfunc (le_optab, mode, "_q_fle");
18036 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18037 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18038 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18039 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18040 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18041 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18042 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18043 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18047 static void
18048 rs6000_init_libfuncs (void)
18050 /* __float128 support. */
18051 if (TARGET_FLOAT128)
18053 init_float128_ibm (IFmode);
18054 init_float128_ieee (KFmode);
18057 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18058 if (TARGET_LONG_DOUBLE_128)
18060 if (!TARGET_IEEEQUAD)
18061 init_float128_ibm (TFmode);
18063 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18064 else
18065 init_float128_ieee (TFmode);
18070 /* Expand a block clear operation, and return 1 if successful. Return 0
18071 if we should let the compiler generate normal code.
18073 operands[0] is the destination
18074 operands[1] is the length
18075 operands[3] is the alignment */
18078 expand_block_clear (rtx operands[])
18080 rtx orig_dest = operands[0];
18081 rtx bytes_rtx = operands[1];
18082 rtx align_rtx = operands[3];
18083 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
18084 HOST_WIDE_INT align;
18085 HOST_WIDE_INT bytes;
18086 int offset;
18087 int clear_bytes;
18088 int clear_step;
18090 /* If this is not a fixed size move, just call memcpy */
18091 if (! constp)
18092 return 0;
18094 /* This must be a fixed size alignment */
18095 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
18096 align = INTVAL (align_rtx) * BITS_PER_UNIT;
18098 /* Anything to clear? */
18099 bytes = INTVAL (bytes_rtx);
18100 if (bytes <= 0)
18101 return 1;
18103 /* Use the builtin memset after a point, to avoid huge code bloat.
18104 When optimize_size, avoid any significant code bloat; calling
18105 memset is about 4 instructions, so allow for one instruction to
18106 load zero and three to do clearing. */
18107 if (TARGET_ALTIVEC && align >= 128)
18108 clear_step = 16;
18109 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
18110 clear_step = 8;
18111 else if (TARGET_SPE && align >= 64)
18112 clear_step = 8;
18113 else
18114 clear_step = 4;
18116 if (optimize_size && bytes > 3 * clear_step)
18117 return 0;
18118 if (! optimize_size && bytes > 8 * clear_step)
18119 return 0;
18121 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
18123 machine_mode mode = BLKmode;
18124 rtx dest;
18126 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
18128 clear_bytes = 16;
18129 mode = V4SImode;
18131 else if (bytes >= 8 && TARGET_SPE && align >= 64)
18133 clear_bytes = 8;
18134 mode = V2SImode;
18136 else if (bytes >= 8 && TARGET_POWERPC64
18137 && (align >= 64 || !STRICT_ALIGNMENT))
18139 clear_bytes = 8;
18140 mode = DImode;
18141 if (offset == 0 && align < 64)
18143 rtx addr;
18145 /* If the address form is reg+offset with offset not a
18146 multiple of four, reload into reg indirect form here
18147 rather than waiting for reload. This way we get one
18148 reload, not one per store. */
18149 addr = XEXP (orig_dest, 0);
18150 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
18151 && GET_CODE (XEXP (addr, 1)) == CONST_INT
18152 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
18154 addr = copy_addr_to_reg (addr);
18155 orig_dest = replace_equiv_address (orig_dest, addr);
18159 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
18160 { /* move 4 bytes */
18161 clear_bytes = 4;
18162 mode = SImode;
18164 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
18165 { /* move 2 bytes */
18166 clear_bytes = 2;
18167 mode = HImode;
18169 else /* move 1 byte at a time */
18171 clear_bytes = 1;
18172 mode = QImode;
18175 dest = adjust_address (orig_dest, mode, offset);
18177 emit_move_insn (dest, CONST0_RTX (mode));
18180 return 1;
18184 /* Expand a block move operation, and return 1 if successful. Return 0
18185 if we should let the compiler generate normal code.
18187 operands[0] is the destination
18188 operands[1] is the source
18189 operands[2] is the length
18190 operands[3] is the alignment */
18192 #define MAX_MOVE_REG 4
18195 expand_block_move (rtx operands[])
18197 rtx orig_dest = operands[0];
18198 rtx orig_src = operands[1];
18199 rtx bytes_rtx = operands[2];
18200 rtx align_rtx = operands[3];
18201 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
18202 int align;
18203 int bytes;
18204 int offset;
18205 int move_bytes;
18206 rtx stores[MAX_MOVE_REG];
18207 int num_reg = 0;
18209 /* If this is not a fixed size move, just call memcpy */
18210 if (! constp)
18211 return 0;
18213 /* This must be a fixed size alignment */
18214 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
18215 align = INTVAL (align_rtx) * BITS_PER_UNIT;
18217 /* Anything to move? */
18218 bytes = INTVAL (bytes_rtx);
18219 if (bytes <= 0)
18220 return 1;
18222 if (bytes > rs6000_block_move_inline_limit)
18223 return 0;
18225 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
18227 union {
18228 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
18229 rtx (*mov) (rtx, rtx);
18230 } gen_func;
18231 machine_mode mode = BLKmode;
18232 rtx src, dest;
18234 /* Altivec first, since it will be faster than a string move
18235 when it applies, and usually not significantly larger. */
18236 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
18238 move_bytes = 16;
18239 mode = V4SImode;
18240 gen_func.mov = gen_movv4si;
18242 else if (TARGET_SPE && bytes >= 8 && align >= 64)
18244 move_bytes = 8;
18245 mode = V2SImode;
18246 gen_func.mov = gen_movv2si;
18248 else if (TARGET_STRING
18249 && bytes > 24 /* move up to 32 bytes at a time */
18250 && ! fixed_regs[5]
18251 && ! fixed_regs[6]
18252 && ! fixed_regs[7]
18253 && ! fixed_regs[8]
18254 && ! fixed_regs[9]
18255 && ! fixed_regs[10]
18256 && ! fixed_regs[11]
18257 && ! fixed_regs[12])
18259 move_bytes = (bytes > 32) ? 32 : bytes;
18260 gen_func.movmemsi = gen_movmemsi_8reg;
18262 else if (TARGET_STRING
18263 && bytes > 16 /* move up to 24 bytes at a time */
18264 && ! fixed_regs[5]
18265 && ! fixed_regs[6]
18266 && ! fixed_regs[7]
18267 && ! fixed_regs[8]
18268 && ! fixed_regs[9]
18269 && ! fixed_regs[10])
18271 move_bytes = (bytes > 24) ? 24 : bytes;
18272 gen_func.movmemsi = gen_movmemsi_6reg;
18274 else if (TARGET_STRING
18275 && bytes > 8 /* move up to 16 bytes at a time */
18276 && ! fixed_regs[5]
18277 && ! fixed_regs[6]
18278 && ! fixed_regs[7]
18279 && ! fixed_regs[8])
18281 move_bytes = (bytes > 16) ? 16 : bytes;
18282 gen_func.movmemsi = gen_movmemsi_4reg;
18284 else if (bytes >= 8 && TARGET_POWERPC64
18285 && (align >= 64 || !STRICT_ALIGNMENT))
18287 move_bytes = 8;
18288 mode = DImode;
18289 gen_func.mov = gen_movdi;
18290 if (offset == 0 && align < 64)
18292 rtx addr;
18294 /* If the address form is reg+offset with offset not a
18295 multiple of four, reload into reg indirect form here
18296 rather than waiting for reload. This way we get one
18297 reload, not one per load and/or store. */
18298 addr = XEXP (orig_dest, 0);
18299 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
18300 && GET_CODE (XEXP (addr, 1)) == CONST_INT
18301 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
18303 addr = copy_addr_to_reg (addr);
18304 orig_dest = replace_equiv_address (orig_dest, addr);
18306 addr = XEXP (orig_src, 0);
18307 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
18308 && GET_CODE (XEXP (addr, 1)) == CONST_INT
18309 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
18311 addr = copy_addr_to_reg (addr);
18312 orig_src = replace_equiv_address (orig_src, addr);
18316 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
18317 { /* move up to 8 bytes at a time */
18318 move_bytes = (bytes > 8) ? 8 : bytes;
18319 gen_func.movmemsi = gen_movmemsi_2reg;
18321 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
18322 { /* move 4 bytes */
18323 move_bytes = 4;
18324 mode = SImode;
18325 gen_func.mov = gen_movsi;
18327 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
18328 { /* move 2 bytes */
18329 move_bytes = 2;
18330 mode = HImode;
18331 gen_func.mov = gen_movhi;
18333 else if (TARGET_STRING && bytes > 1)
18334 { /* move up to 4 bytes at a time */
18335 move_bytes = (bytes > 4) ? 4 : bytes;
18336 gen_func.movmemsi = gen_movmemsi_1reg;
18338 else /* move 1 byte at a time */
18340 move_bytes = 1;
18341 mode = QImode;
18342 gen_func.mov = gen_movqi;
18345 src = adjust_address (orig_src, mode, offset);
18346 dest = adjust_address (orig_dest, mode, offset);
18348 if (mode != BLKmode)
18350 rtx tmp_reg = gen_reg_rtx (mode);
18352 emit_insn ((*gen_func.mov) (tmp_reg, src));
18353 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
18356 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
18358 int i;
18359 for (i = 0; i < num_reg; i++)
18360 emit_insn (stores[i]);
18361 num_reg = 0;
18364 if (mode == BLKmode)
18366 /* Move the address into scratch registers. The movmemsi
18367 patterns require zero offset. */
18368 if (!REG_P (XEXP (src, 0)))
18370 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
18371 src = replace_equiv_address (src, src_reg);
18373 set_mem_size (src, move_bytes);
18375 if (!REG_P (XEXP (dest, 0)))
18377 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
18378 dest = replace_equiv_address (dest, dest_reg);
18380 set_mem_size (dest, move_bytes);
18382 emit_insn ((*gen_func.movmemsi) (dest, src,
18383 GEN_INT (move_bytes & 31),
18384 align_rtx));
18388 return 1;
18392 /* Return a string to perform a load_multiple operation.
18393 operands[0] is the vector.
18394 operands[1] is the source address.
18395 operands[2] is the first destination register. */
18397 const char *
18398 rs6000_output_load_multiple (rtx operands[3])
18400 /* We have to handle the case where the pseudo used to contain the address
18401 is assigned to one of the output registers. */
18402 int i, j;
18403 int words = XVECLEN (operands[0], 0);
18404 rtx xop[10];
18406 if (XVECLEN (operands[0], 0) == 1)
18407 return "lwz %2,0(%1)";
18409 for (i = 0; i < words; i++)
18410 if (refers_to_regno_p (REGNO (operands[2]) + i, operands[1]))
18412 if (i == words-1)
18414 xop[0] = GEN_INT (4 * (words-1));
18415 xop[1] = operands[1];
18416 xop[2] = operands[2];
18417 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
18418 return "";
18420 else if (i == 0)
18422 xop[0] = GEN_INT (4 * (words-1));
18423 xop[1] = operands[1];
18424 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
18425 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
18426 return "";
18428 else
18430 for (j = 0; j < words; j++)
18431 if (j != i)
18433 xop[0] = GEN_INT (j * 4);
18434 xop[1] = operands[1];
18435 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
18436 output_asm_insn ("lwz %2,%0(%1)", xop);
18438 xop[0] = GEN_INT (i * 4);
18439 xop[1] = operands[1];
18440 output_asm_insn ("lwz %1,%0(%1)", xop);
18441 return "";
18445 return "lswi %2,%1,%N0";
18449 /* A validation routine: say whether CODE, a condition code, and MODE
18450 match. The other alternatives either don't make sense or should
18451 never be generated. */
18453 void
18454 validate_condition_mode (enum rtx_code code, machine_mode mode)
18456 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18457 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18458 && GET_MODE_CLASS (mode) == MODE_CC);
18460 /* These don't make sense. */
18461 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18462 || mode != CCUNSmode);
18464 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18465 || mode == CCUNSmode);
18467 gcc_assert (mode == CCFPmode
18468 || (code != ORDERED && code != UNORDERED
18469 && code != UNEQ && code != LTGT
18470 && code != UNGT && code != UNLT
18471 && code != UNGE && code != UNLE));
18473 /* These should never be generated except for
18474 flag_finite_math_only. */
18475 gcc_assert (mode != CCFPmode
18476 || flag_finite_math_only
18477 || (code != LE && code != GE
18478 && code != UNEQ && code != LTGT
18479 && code != UNGT && code != UNLT));
18481 /* These are invalid; the information is not there. */
18482 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18486 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18487 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18488 not zero, store there the bit offset (counted from the right) where
18489 the single stretch of 1 bits begins; and similarly for B, the bit
18490 offset where it ends. */
18492 bool
18493 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18495 unsigned HOST_WIDE_INT val = INTVAL (mask);
18496 unsigned HOST_WIDE_INT bit;
18497 int nb, ne;
18498 int n = GET_MODE_PRECISION (mode);
18500 if (mode != DImode && mode != SImode)
18501 return false;
18503 if (INTVAL (mask) >= 0)
18505 bit = val & -val;
18506 ne = exact_log2 (bit);
18507 nb = exact_log2 (val + bit);
18509 else if (val + 1 == 0)
18511 nb = n;
18512 ne = 0;
18514 else if (val & 1)
18516 val = ~val;
18517 bit = val & -val;
18518 nb = exact_log2 (bit);
18519 ne = exact_log2 (val + bit);
18521 else
18523 bit = val & -val;
18524 ne = exact_log2 (bit);
18525 if (val + bit == 0)
18526 nb = n;
18527 else
18528 nb = 0;
18531 nb--;
18533 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18534 return false;
18536 if (b)
18537 *b = nb;
18538 if (e)
18539 *e = ne;
18541 return true;
18544 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18545 or rldicr instruction, to implement an AND with it in mode MODE. */
18547 bool
18548 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18550 int nb, ne;
18552 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18553 return false;
18555 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18556 does not wrap. */
18557 if (mode == DImode)
18558 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18560 /* For SImode, rlwinm can do everything. */
18561 if (mode == SImode)
18562 return (nb < 32 && ne < 32);
18564 return false;
18567 /* Return the instruction template for an AND with mask in mode MODE, with
18568 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18570 const char *
18571 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18573 int nb, ne;
18575 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18576 gcc_unreachable ();
18578 if (mode == DImode && ne == 0)
18580 operands[3] = GEN_INT (63 - nb);
18581 if (dot)
18582 return "rldicl. %0,%1,0,%3";
18583 return "rldicl %0,%1,0,%3";
18586 if (mode == DImode && nb == 63)
18588 operands[3] = GEN_INT (63 - ne);
18589 if (dot)
18590 return "rldicr. %0,%1,0,%3";
18591 return "rldicr %0,%1,0,%3";
18594 if (nb < 32 && ne < 32)
18596 operands[3] = GEN_INT (31 - nb);
18597 operands[4] = GEN_INT (31 - ne);
18598 if (dot)
18599 return "rlwinm. %0,%1,0,%3,%4";
18600 return "rlwinm %0,%1,0,%3,%4";
18603 gcc_unreachable ();
18606 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18607 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18608 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18610 bool
18611 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18613 int nb, ne;
18615 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18616 return false;
18618 int n = GET_MODE_PRECISION (mode);
18619 int sh = -1;
18621 if (CONST_INT_P (XEXP (shift, 1)))
18623 sh = INTVAL (XEXP (shift, 1));
18624 if (sh < 0 || sh >= n)
18625 return false;
18628 rtx_code code = GET_CODE (shift);
18630 /* Convert any shift by 0 to a rotate, to simplify below code. */
18631 if (sh == 0)
18632 code = ROTATE;
18634 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18635 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18636 code = ASHIFT;
18637 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18639 code = LSHIFTRT;
18640 sh = n - sh;
18643 /* DImode rotates need rld*. */
18644 if (mode == DImode && code == ROTATE)
18645 return (nb == 63 || ne == 0 || ne == sh);
18647 /* SImode rotates need rlw*. */
18648 if (mode == SImode && code == ROTATE)
18649 return (nb < 32 && ne < 32 && sh < 32);
18651 /* Wrap-around masks are only okay for rotates. */
18652 if (ne > nb)
18653 return false;
18655 /* Variable shifts are only okay for rotates. */
18656 if (sh < 0)
18657 return false;
18659 /* Don't allow ASHIFT if the mask is wrong for that. */
18660 if (code == ASHIFT && ne < sh)
18661 return false;
18663 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18664 if the mask is wrong for that. */
18665 if (nb < 32 && ne < 32 && sh < 32
18666 && !(code == LSHIFTRT && nb >= 32 - sh))
18667 return true;
18669 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18670 if the mask is wrong for that. */
18671 if (code == LSHIFTRT)
18672 sh = 64 - sh;
18673 if (nb == 63 || ne == 0 || ne == sh)
18674 return !(code == LSHIFTRT && nb >= sh);
18676 return false;
18679 /* Return the instruction template for a shift with mask in mode MODE, with
18680 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18682 const char *
18683 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18685 int nb, ne;
18687 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18688 gcc_unreachable ();
18690 if (mode == DImode && ne == 0)
18692 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18693 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18694 operands[3] = GEN_INT (63 - nb);
18695 if (dot)
18696 return "rld%I2cl. %0,%1,%2,%3";
18697 return "rld%I2cl %0,%1,%2,%3";
18700 if (mode == DImode && nb == 63)
18702 operands[3] = GEN_INT (63 - ne);
18703 if (dot)
18704 return "rld%I2cr. %0,%1,%2,%3";
18705 return "rld%I2cr %0,%1,%2,%3";
18708 if (mode == DImode
18709 && GET_CODE (operands[4]) != LSHIFTRT
18710 && CONST_INT_P (operands[2])
18711 && ne == INTVAL (operands[2]))
18713 operands[3] = GEN_INT (63 - nb);
18714 if (dot)
18715 return "rld%I2c. %0,%1,%2,%3";
18716 return "rld%I2c %0,%1,%2,%3";
18719 if (nb < 32 && ne < 32)
18721 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18722 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18723 operands[3] = GEN_INT (31 - nb);
18724 operands[4] = GEN_INT (31 - ne);
18725 /* This insn can also be a 64-bit rotate with mask that really makes
18726 it just a shift right (with mask); the %h below are to adjust for
18727 that situation (shift count is >= 32 in that case). */
18728 if (dot)
18729 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18730 return "rlw%I2nm %0,%1,%h2,%3,%4";
18733 gcc_unreachable ();
18736 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18737 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18738 ASHIFT, or LSHIFTRT) in mode MODE. */
18740 bool
18741 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18743 int nb, ne;
18745 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18746 return false;
18748 int n = GET_MODE_PRECISION (mode);
18750 int sh = INTVAL (XEXP (shift, 1));
18751 if (sh < 0 || sh >= n)
18752 return false;
18754 rtx_code code = GET_CODE (shift);
18756 /* Convert any shift by 0 to a rotate, to simplify below code. */
18757 if (sh == 0)
18758 code = ROTATE;
18760 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18761 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18762 code = ASHIFT;
18763 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18765 code = LSHIFTRT;
18766 sh = n - sh;
18769 /* DImode rotates need rldimi. */
18770 if (mode == DImode && code == ROTATE)
18771 return (ne == sh);
18773 /* SImode rotates need rlwimi. */
18774 if (mode == SImode && code == ROTATE)
18775 return (nb < 32 && ne < 32 && sh < 32);
18777 /* Wrap-around masks are only okay for rotates. */
18778 if (ne > nb)
18779 return false;
18781 /* Don't allow ASHIFT if the mask is wrong for that. */
18782 if (code == ASHIFT && ne < sh)
18783 return false;
18785 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18786 if the mask is wrong for that. */
18787 if (nb < 32 && ne < 32 && sh < 32
18788 && !(code == LSHIFTRT && nb >= 32 - sh))
18789 return true;
18791 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18792 if the mask is wrong for that. */
18793 if (code == LSHIFTRT)
18794 sh = 64 - sh;
18795 if (ne == sh)
18796 return !(code == LSHIFTRT && nb >= sh);
18798 return false;
18801 /* Return the instruction template for an insert with mask in mode MODE, with
18802 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18804 const char *
18805 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18807 int nb, ne;
18809 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18810 gcc_unreachable ();
18812 /* Prefer rldimi because rlwimi is cracked. */
18813 if (TARGET_POWERPC64
18814 && (!dot || mode == DImode)
18815 && GET_CODE (operands[4]) != LSHIFTRT
18816 && ne == INTVAL (operands[2]))
18818 operands[3] = GEN_INT (63 - nb);
18819 if (dot)
18820 return "rldimi. %0,%1,%2,%3";
18821 return "rldimi %0,%1,%2,%3";
18824 if (nb < 32 && ne < 32)
18826 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18827 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18828 operands[3] = GEN_INT (31 - nb);
18829 operands[4] = GEN_INT (31 - ne);
18830 if (dot)
18831 return "rlwimi. %0,%1,%2,%3,%4";
18832 return "rlwimi %0,%1,%2,%3,%4";
18835 gcc_unreachable ();
18838 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18839 using two machine instructions. */
18841 bool
18842 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18844 /* There are two kinds of AND we can handle with two insns:
18845 1) those we can do with two rl* insn;
18846 2) ori[s];xori[s].
18848 We do not handle that last case yet. */
18850 /* If there is just one stretch of ones, we can do it. */
18851 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18852 return true;
18854 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18855 one insn, we can do the whole thing with two. */
18856 unsigned HOST_WIDE_INT val = INTVAL (c);
18857 unsigned HOST_WIDE_INT bit1 = val & -val;
18858 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18859 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18860 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18861 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18864 /* Emit a potentially record-form instruction, setting DST from SRC.
18865 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18866 signed comparison of DST with zero. If DOT is 1, the generated RTL
18867 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18868 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18869 a separate COMPARE. */
18871 static void
18872 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18874 if (dot == 0)
18876 emit_move_insn (dst, src);
18877 return;
18880 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18882 emit_move_insn (dst, src);
18883 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18884 return;
18887 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18888 if (dot == 1)
18890 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18891 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18893 else
18895 rtx set = gen_rtx_SET (dst, src);
18896 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18900 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18901 If EXPAND is true, split rotate-and-mask instructions we generate to
18902 their constituent parts as well (this is used during expand); if DOT
18903 is 1, make the last insn a record-form instruction clobbering the
18904 destination GPR and setting the CC reg (from operands[3]); if 2, set
18905 that GPR as well as the CC reg. */
18907 void
18908 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18910 gcc_assert (!(expand && dot));
18912 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18914 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18915 shift right. This generates better code than doing the masks without
18916 shifts, or shifting first right and then left. */
18917 int nb, ne;
18918 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18920 gcc_assert (mode == DImode);
18922 int shift = 63 - nb;
18923 if (expand)
18925 rtx tmp1 = gen_reg_rtx (DImode);
18926 rtx tmp2 = gen_reg_rtx (DImode);
18927 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18928 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18929 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18931 else
18933 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18934 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18935 emit_move_insn (operands[0], tmp);
18936 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18937 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18939 return;
18942 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18943 that does the rest. */
18944 unsigned HOST_WIDE_INT bit1 = val & -val;
18945 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18946 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18947 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18949 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18950 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18952 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18954 /* Two "no-rotate"-and-mask instructions, for SImode. */
18955 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18957 gcc_assert (mode == SImode);
18959 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18960 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18961 emit_move_insn (reg, tmp);
18962 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18963 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18964 return;
18967 gcc_assert (mode == DImode);
18969 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18970 insns; we have to do the first in SImode, because it wraps. */
18971 if (mask2 <= 0xffffffff
18972 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18974 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18975 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18976 GEN_INT (mask1));
18977 rtx reg_low = gen_lowpart (SImode, reg);
18978 emit_move_insn (reg_low, tmp);
18979 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18980 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18981 return;
18984 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18985 at the top end), rotate back and clear the other hole. */
18986 int right = exact_log2 (bit3);
18987 int left = 64 - right;
18989 /* Rotate the mask too. */
18990 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18992 if (expand)
18994 rtx tmp1 = gen_reg_rtx (DImode);
18995 rtx tmp2 = gen_reg_rtx (DImode);
18996 rtx tmp3 = gen_reg_rtx (DImode);
18997 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18998 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18999 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19000 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19002 else
19004 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19005 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19006 emit_move_insn (operands[0], tmp);
19007 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19008 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19009 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19013 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19014 for lfq and stfq insns iff the registers are hard registers. */
19017 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19019 /* We might have been passed a SUBREG. */
19020 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19021 return 0;
19023 /* We might have been passed non floating point registers. */
19024 if (!FP_REGNO_P (REGNO (reg1))
19025 || !FP_REGNO_P (REGNO (reg2)))
19026 return 0;
19028 return (REGNO (reg1) == REGNO (reg2) - 1);
19031 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19032 addr1 and addr2 must be in consecutive memory locations
19033 (addr2 == addr1 + 8). */
19036 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19038 rtx addr1, addr2;
19039 unsigned int reg1, reg2;
19040 int offset1, offset2;
19042 /* The mems cannot be volatile. */
19043 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19044 return 0;
19046 addr1 = XEXP (mem1, 0);
19047 addr2 = XEXP (mem2, 0);
19049 /* Extract an offset (if used) from the first addr. */
19050 if (GET_CODE (addr1) == PLUS)
19052 /* If not a REG, return zero. */
19053 if (GET_CODE (XEXP (addr1, 0)) != REG)
19054 return 0;
19055 else
19057 reg1 = REGNO (XEXP (addr1, 0));
19058 /* The offset must be constant! */
19059 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19060 return 0;
19061 offset1 = INTVAL (XEXP (addr1, 1));
19064 else if (GET_CODE (addr1) != REG)
19065 return 0;
19066 else
19068 reg1 = REGNO (addr1);
19069 /* This was a simple (mem (reg)) expression. Offset is 0. */
19070 offset1 = 0;
19073 /* And now for the second addr. */
19074 if (GET_CODE (addr2) == PLUS)
19076 /* If not a REG, return zero. */
19077 if (GET_CODE (XEXP (addr2, 0)) != REG)
19078 return 0;
19079 else
19081 reg2 = REGNO (XEXP (addr2, 0));
19082 /* The offset must be constant. */
19083 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19084 return 0;
19085 offset2 = INTVAL (XEXP (addr2, 1));
19088 else if (GET_CODE (addr2) != REG)
19089 return 0;
19090 else
19092 reg2 = REGNO (addr2);
19093 /* This was a simple (mem (reg)) expression. Offset is 0. */
19094 offset2 = 0;
19097 /* Both of these must have the same base register. */
19098 if (reg1 != reg2)
19099 return 0;
19101 /* The offset for the second addr must be 8 more than the first addr. */
19102 if (offset2 != offset1 + 8)
19103 return 0;
19105 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19106 instructions. */
19107 return 1;
19112 rs6000_secondary_memory_needed_rtx (machine_mode mode)
19114 static bool eliminated = false;
19115 rtx ret;
19117 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
19118 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19119 else
19121 rtx mem = cfun->machine->sdmode_stack_slot;
19122 gcc_assert (mem != NULL_RTX);
19124 if (!eliminated)
19126 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
19127 cfun->machine->sdmode_stack_slot = mem;
19128 eliminated = true;
19130 ret = mem;
19133 if (TARGET_DEBUG_ADDR)
19135 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
19136 GET_MODE_NAME (mode));
19137 if (!ret)
19138 fprintf (stderr, "\tNULL_RTX\n");
19139 else
19140 debug_rtx (ret);
19143 return ret;
19146 /* Return the mode to be used for memory when a secondary memory
19147 location is needed. For SDmode values we need to use DDmode, in
19148 all other cases we can use the same mode. */
19149 machine_mode
19150 rs6000_secondary_memory_needed_mode (machine_mode mode)
19152 if (lra_in_progress && mode == SDmode)
19153 return DDmode;
19154 return mode;
19157 static tree
19158 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
19160 /* Don't walk into types. */
19161 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
19163 *walk_subtrees = 0;
19164 return NULL_TREE;
19167 switch (TREE_CODE (*tp))
19169 case VAR_DECL:
19170 case PARM_DECL:
19171 case FIELD_DECL:
19172 case RESULT_DECL:
19173 case SSA_NAME:
19174 case REAL_CST:
19175 case MEM_REF:
19176 case VIEW_CONVERT_EXPR:
19177 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
19178 return *tp;
19179 break;
19180 default:
19181 break;
19184 return NULL_TREE;
19187 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19188 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19189 only work on the traditional altivec registers, note if an altivec register
19190 was chosen. */
19192 static enum rs6000_reg_type
19193 register_to_reg_type (rtx reg, bool *is_altivec)
19195 HOST_WIDE_INT regno;
19196 enum reg_class rclass;
19198 if (GET_CODE (reg) == SUBREG)
19199 reg = SUBREG_REG (reg);
19201 if (!REG_P (reg))
19202 return NO_REG_TYPE;
19204 regno = REGNO (reg);
19205 if (regno >= FIRST_PSEUDO_REGISTER)
19207 if (!lra_in_progress && !reload_in_progress && !reload_completed)
19208 return PSEUDO_REG_TYPE;
19210 regno = true_regnum (reg);
19211 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19212 return PSEUDO_REG_TYPE;
19215 gcc_assert (regno >= 0);
19217 if (is_altivec && ALTIVEC_REGNO_P (regno))
19218 *is_altivec = true;
19220 rclass = rs6000_regno_regclass[regno];
19221 return reg_class_to_reg_type[(int)rclass];
19224 /* Helper function to return the cost of adding a TOC entry address. */
19226 static inline int
19227 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19229 int ret;
19231 if (TARGET_CMODEL != CMODEL_SMALL)
19232 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19234 else
19235 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19237 return ret;
19240 /* Helper function for rs6000_secondary_reload to determine whether the memory
19241 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19242 needs reloading. Return negative if the memory is not handled by the memory
19243 helper functions and to try a different reload method, 0 if no additional
19244 instructions are need, and positive to give the extra cost for the
19245 memory. */
19247 static int
19248 rs6000_secondary_reload_memory (rtx addr,
19249 enum reg_class rclass,
19250 machine_mode mode)
19252 int extra_cost = 0;
19253 rtx reg, and_arg, plus_arg0, plus_arg1;
19254 addr_mask_type addr_mask;
19255 const char *type = NULL;
19256 const char *fail_msg = NULL;
19258 if (GPR_REG_CLASS_P (rclass))
19259 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19261 else if (rclass == FLOAT_REGS)
19262 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19264 else if (rclass == ALTIVEC_REGS)
19265 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19267 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19268 else if (rclass == VSX_REGS)
19269 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19270 & ~RELOAD_REG_AND_M16);
19272 /* If the register allocator hasn't made up its mind yet on the register
19273 class to use, settle on defaults to use. */
19274 else if (rclass == NO_REGS)
19276 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19277 & ~RELOAD_REG_AND_M16);
19279 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19280 addr_mask &= ~(RELOAD_REG_INDEXED
19281 | RELOAD_REG_PRE_INCDEC
19282 | RELOAD_REG_PRE_MODIFY);
19285 else
19286 addr_mask = 0;
19288 /* If the register isn't valid in this register class, just return now. */
19289 if ((addr_mask & RELOAD_REG_VALID) == 0)
19291 if (TARGET_DEBUG_ADDR)
19293 fprintf (stderr,
19294 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19295 "not valid in class\n",
19296 GET_MODE_NAME (mode), reg_class_names[rclass]);
19297 debug_rtx (addr);
19300 return -1;
19303 switch (GET_CODE (addr))
19305 /* Does the register class supports auto update forms for this mode? We
19306 don't need a scratch register, since the powerpc only supports
19307 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19308 case PRE_INC:
19309 case PRE_DEC:
19310 reg = XEXP (addr, 0);
19311 if (!base_reg_operand (addr, GET_MODE (reg)))
19313 fail_msg = "no base register #1";
19314 extra_cost = -1;
19317 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19319 extra_cost = 1;
19320 type = "update";
19322 break;
19324 case PRE_MODIFY:
19325 reg = XEXP (addr, 0);
19326 plus_arg1 = XEXP (addr, 1);
19327 if (!base_reg_operand (reg, GET_MODE (reg))
19328 || GET_CODE (plus_arg1) != PLUS
19329 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19331 fail_msg = "bad PRE_MODIFY";
19332 extra_cost = -1;
19335 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19337 extra_cost = 1;
19338 type = "update";
19340 break;
19342 /* Do we need to simulate AND -16 to clear the bottom address bits used
19343 in VMX load/stores? Only allow the AND for vector sizes. */
19344 case AND:
19345 and_arg = XEXP (addr, 0);
19346 if (GET_MODE_SIZE (mode) != 16
19347 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19348 || INTVAL (XEXP (addr, 1)) != -16)
19350 fail_msg = "bad Altivec AND #1";
19351 extra_cost = -1;
19354 if (rclass != ALTIVEC_REGS)
19356 if (legitimate_indirect_address_p (and_arg, false))
19357 extra_cost = 1;
19359 else if (legitimate_indexed_address_p (and_arg, false))
19360 extra_cost = 2;
19362 else
19364 fail_msg = "bad Altivec AND #2";
19365 extra_cost = -1;
19368 type = "and";
19370 break;
19372 /* If this is an indirect address, make sure it is a base register. */
19373 case REG:
19374 case SUBREG:
19375 if (!legitimate_indirect_address_p (addr, false))
19377 extra_cost = 1;
19378 type = "move";
19380 break;
19382 /* If this is an indexed address, make sure the register class can handle
19383 indexed addresses for this mode. */
19384 case PLUS:
19385 plus_arg0 = XEXP (addr, 0);
19386 plus_arg1 = XEXP (addr, 1);
19388 /* (plus (plus (reg) (constant)) (constant)) is generated during
19389 push_reload processing, so handle it now. */
19390 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19392 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19394 extra_cost = 1;
19395 type = "offset";
19399 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19400 push_reload processing, so handle it now. */
19401 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19403 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19405 extra_cost = 1;
19406 type = "indexed #2";
19410 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19412 fail_msg = "no base register #2";
19413 extra_cost = -1;
19416 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19418 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19419 || !legitimate_indexed_address_p (addr, false))
19421 extra_cost = 1;
19422 type = "indexed";
19426 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19427 && CONST_INT_P (plus_arg1))
19429 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19431 extra_cost = 1;
19432 type = "vector d-form offset";
19436 /* Make sure the register class can handle offset addresses. */
19437 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19439 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19441 extra_cost = 1;
19442 type = "offset #2";
19446 else
19448 fail_msg = "bad PLUS";
19449 extra_cost = -1;
19452 break;
19454 case LO_SUM:
19455 /* Quad offsets are restricted and can't handle normal addresses. */
19456 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19458 extra_cost = -1;
19459 type = "vector d-form lo_sum";
19462 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19464 fail_msg = "bad LO_SUM";
19465 extra_cost = -1;
19468 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19470 extra_cost = 1;
19471 type = "lo_sum";
19473 break;
19475 /* Static addresses need to create a TOC entry. */
19476 case CONST:
19477 case SYMBOL_REF:
19478 case LABEL_REF:
19479 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19481 extra_cost = -1;
19482 type = "vector d-form lo_sum #2";
19485 else
19487 type = "address";
19488 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19490 break;
19492 /* TOC references look like offsetable memory. */
19493 case UNSPEC:
19494 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19496 fail_msg = "bad UNSPEC";
19497 extra_cost = -1;
19500 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19502 extra_cost = -1;
19503 type = "vector d-form lo_sum #3";
19506 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19508 extra_cost = 1;
19509 type = "toc reference";
19511 break;
19513 default:
19515 fail_msg = "bad address";
19516 extra_cost = -1;
19520 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19522 if (extra_cost < 0)
19523 fprintf (stderr,
19524 "rs6000_secondary_reload_memory error: mode = %s, "
19525 "class = %s, addr_mask = '%s', %s\n",
19526 GET_MODE_NAME (mode),
19527 reg_class_names[rclass],
19528 rs6000_debug_addr_mask (addr_mask, false),
19529 (fail_msg != NULL) ? fail_msg : "<bad address>");
19531 else
19532 fprintf (stderr,
19533 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19534 "addr_mask = '%s', extra cost = %d, %s\n",
19535 GET_MODE_NAME (mode),
19536 reg_class_names[rclass],
19537 rs6000_debug_addr_mask (addr_mask, false),
19538 extra_cost,
19539 (type) ? type : "<none>");
19541 debug_rtx (addr);
19544 return extra_cost;
19547 /* Helper function for rs6000_secondary_reload to return true if a move to a
19548 different register classe is really a simple move. */
19550 static bool
19551 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19552 enum rs6000_reg_type from_type,
19553 machine_mode mode)
19555 int size;
19557 /* Add support for various direct moves available. In this function, we only
19558 look at cases where we don't need any extra registers, and one or more
19559 simple move insns are issued. At present, 32-bit integers are not allowed
19560 in FPR/VSX registers. Single precision binary floating is not a simple
19561 move because we need to convert to the single precision memory layout.
19562 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19563 need special direct move handling, which we do not support yet. */
19564 size = GET_MODE_SIZE (mode);
19565 if (TARGET_DIRECT_MOVE
19566 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
19567 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19568 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19569 return true;
19571 else if (TARGET_DIRECT_MOVE_128 && size == 16 && mode != TDmode
19572 && ((to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19573 || (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)))
19574 return true;
19576 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19577 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19578 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19579 return true;
19581 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19582 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19583 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19584 return true;
19586 return false;
19589 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19590 special direct moves that involve allocating an extra register, return the
19591 insn code of the helper function if there is such a function or
19592 CODE_FOR_nothing if not. */
19594 static bool
19595 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19596 enum rs6000_reg_type from_type,
19597 machine_mode mode,
19598 secondary_reload_info *sri,
19599 bool altivec_p)
19601 bool ret = false;
19602 enum insn_code icode = CODE_FOR_nothing;
19603 int cost = 0;
19604 int size = GET_MODE_SIZE (mode);
19606 if (TARGET_POWERPC64 && size == 16)
19608 /* Handle moving 128-bit values from GPRs to VSX point registers on
19609 ISA 2.07 (power8, power9) when running in 64-bit mode using
19610 XXPERMDI to glue the two 64-bit values back together. */
19611 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19613 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19614 icode = reg_addr[mode].reload_vsx_gpr;
19617 /* Handle moving 128-bit values from VSX point registers to GPRs on
19618 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19619 bottom 64-bit value. */
19620 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19622 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19623 icode = reg_addr[mode].reload_gpr_vsx;
19627 else if (TARGET_POWERPC64 && mode == SFmode)
19629 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19631 cost = 3; /* xscvdpspn, mfvsrd, and. */
19632 icode = reg_addr[mode].reload_gpr_vsx;
19635 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19637 cost = 2; /* mtvsrz, xscvspdpn. */
19638 icode = reg_addr[mode].reload_vsx_gpr;
19642 else if (!TARGET_POWERPC64 && size == 8)
19644 /* Handle moving 64-bit values from GPRs to floating point registers on
19645 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19646 32-bit values back together. Altivec register classes must be handled
19647 specially since a different instruction is used, and the secondary
19648 reload support requires a single instruction class in the scratch
19649 register constraint. However, right now TFmode is not allowed in
19650 Altivec registers, so the pattern will never match. */
19651 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19653 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19654 icode = reg_addr[mode].reload_fpr_gpr;
19658 if (icode != CODE_FOR_nothing)
19660 ret = true;
19661 if (sri)
19663 sri->icode = icode;
19664 sri->extra_cost = cost;
19668 return ret;
19671 /* Return whether a move between two register classes can be done either
19672 directly (simple move) or via a pattern that uses a single extra temporary
19673 (using ISA 2.07's direct move in this case. */
19675 static bool
19676 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19677 enum rs6000_reg_type from_type,
19678 machine_mode mode,
19679 secondary_reload_info *sri,
19680 bool altivec_p)
19682 /* Fall back to load/store reloads if either type is not a register. */
19683 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19684 return false;
19686 /* If we haven't allocated registers yet, assume the move can be done for the
19687 standard register types. */
19688 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19689 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19690 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19691 return true;
19693 /* Moves to the same set of registers is a simple move for non-specialized
19694 registers. */
19695 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19696 return true;
19698 /* Check whether a simple move can be done directly. */
19699 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19701 if (sri)
19703 sri->icode = CODE_FOR_nothing;
19704 sri->extra_cost = 0;
19706 return true;
19709 /* Now check if we can do it in a few steps. */
19710 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19711 altivec_p);
19714 /* Inform reload about cases where moving X with a mode MODE to a register in
19715 RCLASS requires an extra scratch or immediate register. Return the class
19716 needed for the immediate register.
19718 For VSX and Altivec, we may need a register to convert sp+offset into
19719 reg+sp.
19721 For misaligned 64-bit gpr loads and stores we need a register to
19722 convert an offset address to indirect. */
19724 static reg_class_t
19725 rs6000_secondary_reload (bool in_p,
19726 rtx x,
19727 reg_class_t rclass_i,
19728 machine_mode mode,
19729 secondary_reload_info *sri)
19731 enum reg_class rclass = (enum reg_class) rclass_i;
19732 reg_class_t ret = ALL_REGS;
19733 enum insn_code icode;
19734 bool default_p = false;
19735 bool done_p = false;
19737 /* Allow subreg of memory before/during reload. */
19738 bool memory_p = (MEM_P (x)
19739 || (!reload_completed && GET_CODE (x) == SUBREG
19740 && MEM_P (SUBREG_REG (x))));
19742 sri->icode = CODE_FOR_nothing;
19743 sri->t_icode = CODE_FOR_nothing;
19744 sri->extra_cost = 0;
19745 icode = ((in_p)
19746 ? reg_addr[mode].reload_load
19747 : reg_addr[mode].reload_store);
19749 if (REG_P (x) || register_operand (x, mode))
19751 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19752 bool altivec_p = (rclass == ALTIVEC_REGS);
19753 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19755 if (!in_p)
19757 enum rs6000_reg_type exchange = to_type;
19758 to_type = from_type;
19759 from_type = exchange;
19762 /* Can we do a direct move of some sort? */
19763 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19764 altivec_p))
19766 icode = (enum insn_code)sri->icode;
19767 default_p = false;
19768 done_p = true;
19769 ret = NO_REGS;
19773 /* Make sure 0.0 is not reloaded or forced into memory. */
19774 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19776 ret = NO_REGS;
19777 default_p = false;
19778 done_p = true;
19781 /* If this is a scalar floating point value and we want to load it into the
19782 traditional Altivec registers, do it via a move via a traditional floating
19783 point register, unless we have D-form addressing. Also make sure that
19784 non-zero constants use a FPR. */
19785 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19786 && !mode_supports_vmx_dform (mode)
19787 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19788 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19790 ret = FLOAT_REGS;
19791 default_p = false;
19792 done_p = true;
19795 /* Handle reload of load/stores if we have reload helper functions. */
19796 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19798 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19799 mode);
19801 if (extra_cost >= 0)
19803 done_p = true;
19804 ret = NO_REGS;
19805 if (extra_cost > 0)
19807 sri->extra_cost = extra_cost;
19808 sri->icode = icode;
19813 /* Handle unaligned loads and stores of integer registers. */
19814 if (!done_p && TARGET_POWERPC64
19815 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19816 && memory_p
19817 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19819 rtx addr = XEXP (x, 0);
19820 rtx off = address_offset (addr);
19822 if (off != NULL_RTX)
19824 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19825 unsigned HOST_WIDE_INT offset = INTVAL (off);
19827 /* We need a secondary reload when our legitimate_address_p
19828 says the address is good (as otherwise the entire address
19829 will be reloaded), and the offset is not a multiple of
19830 four or we have an address wrap. Address wrap will only
19831 occur for LO_SUMs since legitimate_offset_address_p
19832 rejects addresses for 16-byte mems that will wrap. */
19833 if (GET_CODE (addr) == LO_SUM
19834 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19835 && ((offset & 3) != 0
19836 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19837 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19838 && (offset & 3) != 0))
19840 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19841 if (in_p)
19842 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19843 : CODE_FOR_reload_di_load);
19844 else
19845 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19846 : CODE_FOR_reload_di_store);
19847 sri->extra_cost = 2;
19848 ret = NO_REGS;
19849 done_p = true;
19851 else
19852 default_p = true;
19854 else
19855 default_p = true;
19858 if (!done_p && !TARGET_POWERPC64
19859 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19860 && memory_p
19861 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19863 rtx addr = XEXP (x, 0);
19864 rtx off = address_offset (addr);
19866 if (off != NULL_RTX)
19868 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19869 unsigned HOST_WIDE_INT offset = INTVAL (off);
19871 /* We need a secondary reload when our legitimate_address_p
19872 says the address is good (as otherwise the entire address
19873 will be reloaded), and we have a wrap.
19875 legitimate_lo_sum_address_p allows LO_SUM addresses to
19876 have any offset so test for wrap in the low 16 bits.
19878 legitimate_offset_address_p checks for the range
19879 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19880 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19881 [0x7ff4,0x7fff] respectively, so test for the
19882 intersection of these ranges, [0x7ffc,0x7fff] and
19883 [0x7ff4,0x7ff7] respectively.
19885 Note that the address we see here may have been
19886 manipulated by legitimize_reload_address. */
19887 if (GET_CODE (addr) == LO_SUM
19888 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19889 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19891 if (in_p)
19892 sri->icode = CODE_FOR_reload_si_load;
19893 else
19894 sri->icode = CODE_FOR_reload_si_store;
19895 sri->extra_cost = 2;
19896 ret = NO_REGS;
19897 done_p = true;
19899 else
19900 default_p = true;
19902 else
19903 default_p = true;
19906 if (!done_p)
19907 default_p = true;
19909 if (default_p)
19910 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19912 gcc_assert (ret != ALL_REGS);
19914 if (TARGET_DEBUG_ADDR)
19916 fprintf (stderr,
19917 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19918 "mode = %s",
19919 reg_class_names[ret],
19920 in_p ? "true" : "false",
19921 reg_class_names[rclass],
19922 GET_MODE_NAME (mode));
19924 if (reload_completed)
19925 fputs (", after reload", stderr);
19927 if (!done_p)
19928 fputs (", done_p not set", stderr);
19930 if (default_p)
19931 fputs (", default secondary reload", stderr);
19933 if (sri->icode != CODE_FOR_nothing)
19934 fprintf (stderr, ", reload func = %s, extra cost = %d",
19935 insn_data[sri->icode].name, sri->extra_cost);
19937 else if (sri->extra_cost > 0)
19938 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19940 fputs ("\n", stderr);
19941 debug_rtx (x);
19944 return ret;
19947 /* Better tracing for rs6000_secondary_reload_inner. */
19949 static void
19950 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19951 bool store_p)
19953 rtx set, clobber;
19955 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19957 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19958 store_p ? "store" : "load");
19960 if (store_p)
19961 set = gen_rtx_SET (mem, reg);
19962 else
19963 set = gen_rtx_SET (reg, mem);
19965 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19966 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19969 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19970 ATTRIBUTE_NORETURN;
19972 static void
19973 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19974 bool store_p)
19976 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19977 gcc_unreachable ();
19980 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19981 reload helper functions. These were identified in
19982 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19983 reload, it calls the insns:
19984 reload_<RELOAD:mode>_<P:mptrsize>_store
19985 reload_<RELOAD:mode>_<P:mptrsize>_load
19987 which in turn calls this function, to do whatever is necessary to create
19988 valid addresses. */
19990 void
19991 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19993 int regno = true_regnum (reg);
19994 machine_mode mode = GET_MODE (reg);
19995 addr_mask_type addr_mask;
19996 rtx addr;
19997 rtx new_addr;
19998 rtx op_reg, op0, op1;
19999 rtx and_op;
20000 rtx cc_clobber;
20001 rtvec rv;
20003 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20004 || !base_reg_operand (scratch, GET_MODE (scratch)))
20005 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20007 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20008 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20010 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20011 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20013 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20014 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20016 else
20017 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20019 /* Make sure the mode is valid in this register class. */
20020 if ((addr_mask & RELOAD_REG_VALID) == 0)
20021 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20023 if (TARGET_DEBUG_ADDR)
20024 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20026 new_addr = addr = XEXP (mem, 0);
20027 switch (GET_CODE (addr))
20029 /* Does the register class support auto update forms for this mode? If
20030 not, do the update now. We don't need a scratch register, since the
20031 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20032 case PRE_INC:
20033 case PRE_DEC:
20034 op_reg = XEXP (addr, 0);
20035 if (!base_reg_operand (op_reg, Pmode))
20036 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20038 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20040 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20041 new_addr = op_reg;
20043 break;
20045 case PRE_MODIFY:
20046 op0 = XEXP (addr, 0);
20047 op1 = XEXP (addr, 1);
20048 if (!base_reg_operand (op0, Pmode)
20049 || GET_CODE (op1) != PLUS
20050 || !rtx_equal_p (op0, XEXP (op1, 0)))
20051 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20053 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20055 emit_insn (gen_rtx_SET (op0, op1));
20056 new_addr = reg;
20058 break;
20060 /* Do we need to simulate AND -16 to clear the bottom address bits used
20061 in VMX load/stores? */
20062 case AND:
20063 op0 = XEXP (addr, 0);
20064 op1 = XEXP (addr, 1);
20065 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20067 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20068 op_reg = op0;
20070 else if (GET_CODE (op1) == PLUS)
20072 emit_insn (gen_rtx_SET (scratch, op1));
20073 op_reg = scratch;
20076 else
20077 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20079 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20080 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20081 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20082 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20083 new_addr = scratch;
20085 break;
20087 /* If this is an indirect address, make sure it is a base register. */
20088 case REG:
20089 case SUBREG:
20090 if (!base_reg_operand (addr, GET_MODE (addr)))
20092 emit_insn (gen_rtx_SET (scratch, addr));
20093 new_addr = scratch;
20095 break;
20097 /* If this is an indexed address, make sure the register class can handle
20098 indexed addresses for this mode. */
20099 case PLUS:
20100 op0 = XEXP (addr, 0);
20101 op1 = XEXP (addr, 1);
20102 if (!base_reg_operand (op0, Pmode))
20103 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20105 else if (int_reg_operand (op1, Pmode))
20107 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20109 emit_insn (gen_rtx_SET (scratch, addr));
20110 new_addr = scratch;
20114 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20116 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20117 || !quad_address_p (addr, mode, false))
20119 emit_insn (gen_rtx_SET (scratch, addr));
20120 new_addr = scratch;
20124 /* Make sure the register class can handle offset addresses. */
20125 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20127 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20129 emit_insn (gen_rtx_SET (scratch, addr));
20130 new_addr = scratch;
20134 else
20135 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20137 break;
20139 case LO_SUM:
20140 op0 = XEXP (addr, 0);
20141 op1 = XEXP (addr, 1);
20142 if (!base_reg_operand (op0, Pmode))
20143 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20145 else if (int_reg_operand (op1, Pmode))
20147 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20149 emit_insn (gen_rtx_SET (scratch, addr));
20150 new_addr = scratch;
20154 /* Quad offsets are restricted and can't handle normal addresses. */
20155 else if (mode_supports_vsx_dform_quad (mode))
20157 emit_insn (gen_rtx_SET (scratch, addr));
20158 new_addr = scratch;
20161 /* Make sure the register class can handle offset addresses. */
20162 else if (legitimate_lo_sum_address_p (mode, addr, false))
20164 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20166 emit_insn (gen_rtx_SET (scratch, addr));
20167 new_addr = scratch;
20171 else
20172 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20174 break;
20176 case SYMBOL_REF:
20177 case CONST:
20178 case LABEL_REF:
20179 rs6000_emit_move (scratch, addr, Pmode);
20180 new_addr = scratch;
20181 break;
20183 default:
20184 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20187 /* Adjust the address if it changed. */
20188 if (addr != new_addr)
20190 mem = replace_equiv_address_nv (mem, new_addr);
20191 if (TARGET_DEBUG_ADDR)
20192 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20195 /* Now create the move. */
20196 if (store_p)
20197 emit_insn (gen_rtx_SET (mem, reg));
20198 else
20199 emit_insn (gen_rtx_SET (reg, mem));
20201 return;
20204 /* Convert reloads involving 64-bit gprs and misaligned offset
20205 addressing, or multiple 32-bit gprs and offsets that are too large,
20206 to use indirect addressing. */
20208 void
20209 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20211 int regno = true_regnum (reg);
20212 enum reg_class rclass;
20213 rtx addr;
20214 rtx scratch_or_premodify = scratch;
20216 if (TARGET_DEBUG_ADDR)
20218 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20219 store_p ? "store" : "load");
20220 fprintf (stderr, "reg:\n");
20221 debug_rtx (reg);
20222 fprintf (stderr, "mem:\n");
20223 debug_rtx (mem);
20224 fprintf (stderr, "scratch:\n");
20225 debug_rtx (scratch);
20228 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20229 gcc_assert (GET_CODE (mem) == MEM);
20230 rclass = REGNO_REG_CLASS (regno);
20231 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20232 addr = XEXP (mem, 0);
20234 if (GET_CODE (addr) == PRE_MODIFY)
20236 gcc_assert (REG_P (XEXP (addr, 0))
20237 && GET_CODE (XEXP (addr, 1)) == PLUS
20238 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20239 scratch_or_premodify = XEXP (addr, 0);
20240 if (!HARD_REGISTER_P (scratch_or_premodify))
20241 /* If we have a pseudo here then reload will have arranged
20242 to have it replaced, but only in the original insn.
20243 Use the replacement here too. */
20244 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20246 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20247 expressions from the original insn, without unsharing them.
20248 Any RTL that points into the original insn will of course
20249 have register replacements applied. That is why we don't
20250 need to look for replacements under the PLUS. */
20251 addr = XEXP (addr, 1);
20253 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20255 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20257 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20259 /* Now create the move. */
20260 if (store_p)
20261 emit_insn (gen_rtx_SET (mem, reg));
20262 else
20263 emit_insn (gen_rtx_SET (reg, mem));
20265 return;
20268 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
20269 this function has any SDmode references. If we are on a power7 or later, we
20270 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
20271 can load/store the value. */
20273 static void
20274 rs6000_alloc_sdmode_stack_slot (void)
20276 tree t;
20277 basic_block bb;
20278 gimple_stmt_iterator gsi;
20280 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
20281 /* We use a different approach for dealing with the secondary
20282 memory in LRA. */
20283 if (ira_use_lra_p)
20284 return;
20286 if (TARGET_NO_SDMODE_STACK)
20287 return;
20289 FOR_EACH_BB_FN (bb, cfun)
20290 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
20292 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
20293 if (ret)
20295 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
20296 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
20297 SDmode, 0);
20298 return;
20302 /* Check for any SDmode parameters of the function. */
20303 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
20305 if (TREE_TYPE (t) == error_mark_node)
20306 continue;
20308 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
20309 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
20311 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
20312 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
20313 SDmode, 0);
20314 return;
20319 static void
20320 rs6000_instantiate_decls (void)
20322 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
20323 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
20326 /* Given an rtx X being reloaded into a reg required to be
20327 in class CLASS, return the class of reg to actually use.
20328 In general this is just CLASS; but on some machines
20329 in some cases it is preferable to use a more restrictive class.
20331 On the RS/6000, we have to return NO_REGS when we want to reload a
20332 floating-point CONST_DOUBLE to force it to be copied to memory.
20334 We also don't want to reload integer values into floating-point
20335 registers if we can at all help it. In fact, this can
20336 cause reload to die, if it tries to generate a reload of CTR
20337 into a FP register and discovers it doesn't have the memory location
20338 required.
20340 ??? Would it be a good idea to have reload do the converse, that is
20341 try to reload floating modes into FP registers if possible?
20344 static enum reg_class
20345 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20347 machine_mode mode = GET_MODE (x);
20348 bool is_constant = CONSTANT_P (x);
20350 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20351 reload class for it. */
20352 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20353 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20354 return NO_REGS;
20356 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20357 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20358 return NO_REGS;
20360 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20361 the reloading of address expressions using PLUS into floating point
20362 registers. */
20363 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20365 if (is_constant)
20367 /* Zero is always allowed in all VSX registers. */
20368 if (x == CONST0_RTX (mode))
20369 return rclass;
20371 /* If this is a vector constant that can be formed with a few Altivec
20372 instructions, we want altivec registers. */
20373 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20374 return ALTIVEC_REGS;
20376 /* Force constant to memory. */
20377 return NO_REGS;
20380 /* D-form addressing can easily reload the value. */
20381 if (mode_supports_vmx_dform (mode)
20382 || mode_supports_vsx_dform_quad (mode))
20383 return rclass;
20385 /* If this is a scalar floating point value and we don't have D-form
20386 addressing, prefer the traditional floating point registers so that we
20387 can use D-form (register+offset) addressing. */
20388 if (GET_MODE_SIZE (mode) < 16 && rclass == VSX_REGS)
20389 return FLOAT_REGS;
20391 /* Prefer the Altivec registers if Altivec is handling the vector
20392 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20393 loads. */
20394 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20395 || mode == V1TImode)
20396 return ALTIVEC_REGS;
20398 return rclass;
20401 if (is_constant || GET_CODE (x) == PLUS)
20403 if (reg_class_subset_p (GENERAL_REGS, rclass))
20404 return GENERAL_REGS;
20405 if (reg_class_subset_p (BASE_REGS, rclass))
20406 return BASE_REGS;
20407 return NO_REGS;
20410 /* If we haven't picked a register class, and the type is a vector or
20411 floating point type, prefer to use the VSX, FPR, or Altivec register
20412 classes. */
20413 if (rclass == NO_REGS)
20415 if (TARGET_VSX && VECTOR_MEM_VSX_OR_P8_VECTOR_P (mode))
20416 return VSX_REGS;
20418 if (TARGET_ALTIVEC && VECTOR_MEM_ALTIVEC_P (mode))
20419 return ALTIVEC_REGS;
20421 if (DECIMAL_FLOAT_MODE_P (mode))
20422 return TARGET_DFP ? FLOAT_REGS : NO_REGS;
20424 if (TARGET_FPRS && TARGET_HARD_FLOAT && FLOAT_MODE_P (mode)
20425 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20426 return FLOAT_REGS;
20429 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20430 return GENERAL_REGS;
20432 return rclass;
20435 /* Debug version of rs6000_preferred_reload_class. */
20436 static enum reg_class
20437 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20439 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20441 fprintf (stderr,
20442 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20443 "mode = %s, x:\n",
20444 reg_class_names[ret], reg_class_names[rclass],
20445 GET_MODE_NAME (GET_MODE (x)));
20446 debug_rtx (x);
20448 return ret;
20451 /* If we are copying between FP or AltiVec registers and anything else, we need
20452 a memory location. The exception is when we are targeting ppc64 and the
20453 move to/from fpr to gpr instructions are available. Also, under VSX, you
20454 can copy vector registers from the FP register set to the Altivec register
20455 set and vice versa. */
20457 static bool
20458 rs6000_secondary_memory_needed (enum reg_class from_class,
20459 enum reg_class to_class,
20460 machine_mode mode)
20462 enum rs6000_reg_type from_type, to_type;
20463 bool altivec_p = ((from_class == ALTIVEC_REGS)
20464 || (to_class == ALTIVEC_REGS));
20466 /* If a simple/direct move is available, we don't need secondary memory */
20467 from_type = reg_class_to_reg_type[(int)from_class];
20468 to_type = reg_class_to_reg_type[(int)to_class];
20470 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20471 (secondary_reload_info *)0, altivec_p))
20472 return false;
20474 /* If we have a floating point or vector register class, we need to use
20475 memory to transfer the data. */
20476 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20477 return true;
20479 return false;
20482 /* Debug version of rs6000_secondary_memory_needed. */
20483 static bool
20484 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
20485 enum reg_class to_class,
20486 machine_mode mode)
20488 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
20490 fprintf (stderr,
20491 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20492 "to_class = %s, mode = %s\n",
20493 ret ? "true" : "false",
20494 reg_class_names[from_class],
20495 reg_class_names[to_class],
20496 GET_MODE_NAME (mode));
20498 return ret;
20501 /* Return the register class of a scratch register needed to copy IN into
20502 or out of a register in RCLASS in MODE. If it can be done directly,
20503 NO_REGS is returned. */
20505 static enum reg_class
20506 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20507 rtx in)
20509 int regno;
20511 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20512 #if TARGET_MACHO
20513 && MACHOPIC_INDIRECT
20514 #endif
20517 /* We cannot copy a symbolic operand directly into anything
20518 other than BASE_REGS for TARGET_ELF. So indicate that a
20519 register from BASE_REGS is needed as an intermediate
20520 register.
20522 On Darwin, pic addresses require a load from memory, which
20523 needs a base register. */
20524 if (rclass != BASE_REGS
20525 && (GET_CODE (in) == SYMBOL_REF
20526 || GET_CODE (in) == HIGH
20527 || GET_CODE (in) == LABEL_REF
20528 || GET_CODE (in) == CONST))
20529 return BASE_REGS;
20532 if (GET_CODE (in) == REG)
20534 regno = REGNO (in);
20535 if (regno >= FIRST_PSEUDO_REGISTER)
20537 regno = true_regnum (in);
20538 if (regno >= FIRST_PSEUDO_REGISTER)
20539 regno = -1;
20542 else if (GET_CODE (in) == SUBREG)
20544 regno = true_regnum (in);
20545 if (regno >= FIRST_PSEUDO_REGISTER)
20546 regno = -1;
20548 else
20549 regno = -1;
20551 /* If we have VSX register moves, prefer moving scalar values between
20552 Altivec registers and GPR by going via an FPR (and then via memory)
20553 instead of reloading the secondary memory address for Altivec moves. */
20554 if (TARGET_VSX
20555 && GET_MODE_SIZE (mode) < 16
20556 && !mode_supports_vmx_dform (mode)
20557 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20558 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20559 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20560 && (regno >= 0 && INT_REGNO_P (regno)))))
20561 return FLOAT_REGS;
20563 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20564 into anything. */
20565 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20566 || (regno >= 0 && INT_REGNO_P (regno)))
20567 return NO_REGS;
20569 /* Constants, memory, and VSX registers can go into VSX registers (both the
20570 traditional floating point and the altivec registers). */
20571 if (rclass == VSX_REGS
20572 && (regno == -1 || VSX_REGNO_P (regno)))
20573 return NO_REGS;
20575 /* Constants, memory, and FP registers can go into FP registers. */
20576 if ((regno == -1 || FP_REGNO_P (regno))
20577 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20578 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20580 /* Memory, and AltiVec registers can go into AltiVec registers. */
20581 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20582 && rclass == ALTIVEC_REGS)
20583 return NO_REGS;
20585 /* We can copy among the CR registers. */
20586 if ((rclass == CR_REGS || rclass == CR0_REGS)
20587 && regno >= 0 && CR_REGNO_P (regno))
20588 return NO_REGS;
20590 /* Otherwise, we need GENERAL_REGS. */
20591 return GENERAL_REGS;
20594 /* Debug version of rs6000_secondary_reload_class. */
20595 static enum reg_class
20596 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20597 machine_mode mode, rtx in)
20599 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20600 fprintf (stderr,
20601 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20602 "mode = %s, input rtx:\n",
20603 reg_class_names[ret], reg_class_names[rclass],
20604 GET_MODE_NAME (mode));
20605 debug_rtx (in);
20607 return ret;
20610 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20612 static bool
20613 rs6000_cannot_change_mode_class (machine_mode from,
20614 machine_mode to,
20615 enum reg_class rclass)
20617 unsigned from_size = GET_MODE_SIZE (from);
20618 unsigned to_size = GET_MODE_SIZE (to);
20620 if (from_size != to_size)
20622 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20624 if (reg_classes_intersect_p (xclass, rclass))
20626 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
20627 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
20628 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20629 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20631 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20632 single register under VSX because the scalar part of the register
20633 is in the upper 64-bits, and not the lower 64-bits. Types like
20634 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20635 IEEE floating point can't overlap, and neither can small
20636 values. */
20638 if (to_float128_vector_p && from_float128_vector_p)
20639 return false;
20641 else if (to_float128_vector_p || from_float128_vector_p)
20642 return true;
20644 /* TDmode in floating-mode registers must always go into a register
20645 pair with the most significant word in the even-numbered register
20646 to match ISA requirements. In little-endian mode, this does not
20647 match subreg numbering, so we cannot allow subregs. */
20648 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20649 return true;
20651 if (from_size < 8 || to_size < 8)
20652 return true;
20654 if (from_size == 8 && (8 * to_nregs) != to_size)
20655 return true;
20657 if (to_size == 8 && (8 * from_nregs) != from_size)
20658 return true;
20660 return false;
20662 else
20663 return false;
20666 if (TARGET_E500_DOUBLE
20667 && ((((to) == DFmode) + ((from) == DFmode)) == 1
20668 || (((to) == TFmode) + ((from) == TFmode)) == 1
20669 || (((to) == IFmode) + ((from) == IFmode)) == 1
20670 || (((to) == KFmode) + ((from) == KFmode)) == 1
20671 || (((to) == DDmode) + ((from) == DDmode)) == 1
20672 || (((to) == TDmode) + ((from) == TDmode)) == 1
20673 || (((to) == DImode) + ((from) == DImode)) == 1))
20674 return true;
20676 /* Since the VSX register set includes traditional floating point registers
20677 and altivec registers, just check for the size being different instead of
20678 trying to check whether the modes are vector modes. Otherwise it won't
20679 allow say DF and DI to change classes. For types like TFmode and TDmode
20680 that take 2 64-bit registers, rather than a single 128-bit register, don't
20681 allow subregs of those types to other 128 bit types. */
20682 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20684 unsigned num_regs = (from_size + 15) / 16;
20685 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
20686 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
20687 return true;
20689 return (from_size != 8 && from_size != 16);
20692 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20693 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20694 return true;
20696 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
20697 && reg_classes_intersect_p (GENERAL_REGS, rclass))
20698 return true;
20700 return false;
20703 /* Debug version of rs6000_cannot_change_mode_class. */
20704 static bool
20705 rs6000_debug_cannot_change_mode_class (machine_mode from,
20706 machine_mode to,
20707 enum reg_class rclass)
20709 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
20711 fprintf (stderr,
20712 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20713 "to = %s, rclass = %s\n",
20714 ret ? "true" : "false",
20715 GET_MODE_NAME (from), GET_MODE_NAME (to),
20716 reg_class_names[rclass]);
20718 return ret;
20721 /* Return a string to do a move operation of 128 bits of data. */
20723 const char *
20724 rs6000_output_move_128bit (rtx operands[])
20726 rtx dest = operands[0];
20727 rtx src = operands[1];
20728 machine_mode mode = GET_MODE (dest);
20729 int dest_regno;
20730 int src_regno;
20731 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20732 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20734 if (REG_P (dest))
20736 dest_regno = REGNO (dest);
20737 dest_gpr_p = INT_REGNO_P (dest_regno);
20738 dest_fp_p = FP_REGNO_P (dest_regno);
20739 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20740 dest_vsx_p = dest_fp_p | dest_vmx_p;
20742 else
20744 dest_regno = -1;
20745 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20748 if (REG_P (src))
20750 src_regno = REGNO (src);
20751 src_gpr_p = INT_REGNO_P (src_regno);
20752 src_fp_p = FP_REGNO_P (src_regno);
20753 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20754 src_vsx_p = src_fp_p | src_vmx_p;
20756 else
20758 src_regno = -1;
20759 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20762 /* Register moves. */
20763 if (dest_regno >= 0 && src_regno >= 0)
20765 if (dest_gpr_p)
20767 if (src_gpr_p)
20768 return "#";
20770 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20771 return (WORDS_BIG_ENDIAN
20772 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20773 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20775 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20776 return "#";
20779 else if (TARGET_VSX && dest_vsx_p)
20781 if (src_vsx_p)
20782 return "xxlor %x0,%x1,%x1";
20784 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20785 return (WORDS_BIG_ENDIAN
20786 ? "mtvsrdd %x0,%1,%L1"
20787 : "mtvsrdd %x0,%L1,%1");
20789 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20790 return "#";
20793 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20794 return "vor %0,%1,%1";
20796 else if (dest_fp_p && src_fp_p)
20797 return "#";
20800 /* Loads. */
20801 else if (dest_regno >= 0 && MEM_P (src))
20803 if (dest_gpr_p)
20805 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20806 return "lq %0,%1";
20807 else
20808 return "#";
20811 else if (TARGET_ALTIVEC && dest_vmx_p
20812 && altivec_indexed_or_indirect_operand (src, mode))
20813 return "lvx %0,%y1";
20815 else if (TARGET_VSX && dest_vsx_p)
20817 if (mode_supports_vsx_dform_quad (mode)
20818 && quad_address_p (XEXP (src, 0), mode, true))
20819 return "lxv %x0,%1";
20821 else if (TARGET_P9_VECTOR)
20822 return "lxvx %x0,%y1";
20824 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20825 return "lxvw4x %x0,%y1";
20827 else
20828 return "lxvd2x %x0,%y1";
20831 else if (TARGET_ALTIVEC && dest_vmx_p)
20832 return "lvx %0,%y1";
20834 else if (dest_fp_p)
20835 return "#";
20838 /* Stores. */
20839 else if (src_regno >= 0 && MEM_P (dest))
20841 if (src_gpr_p)
20843 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20844 return "stq %1,%0";
20845 else
20846 return "#";
20849 else if (TARGET_ALTIVEC && src_vmx_p
20850 && altivec_indexed_or_indirect_operand (src, mode))
20851 return "stvx %1,%y0";
20853 else if (TARGET_VSX && src_vsx_p)
20855 if (mode_supports_vsx_dform_quad (mode)
20856 && quad_address_p (XEXP (dest, 0), mode, true))
20857 return "stxv %x1,%0";
20859 else if (TARGET_P9_VECTOR)
20860 return "stxvx %x1,%y0";
20862 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20863 return "stxvw4x %x1,%y0";
20865 else
20866 return "stxvd2x %x1,%y0";
20869 else if (TARGET_ALTIVEC && src_vmx_p)
20870 return "stvx %1,%y0";
20872 else if (src_fp_p)
20873 return "#";
20876 /* Constants. */
20877 else if (dest_regno >= 0
20878 && (GET_CODE (src) == CONST_INT
20879 || GET_CODE (src) == CONST_WIDE_INT
20880 || GET_CODE (src) == CONST_DOUBLE
20881 || GET_CODE (src) == CONST_VECTOR))
20883 if (dest_gpr_p)
20884 return "#";
20886 else if ((dest_vmx_p && TARGET_ALTIVEC)
20887 || (dest_vsx_p && TARGET_VSX))
20888 return output_vec_const_move (operands);
20891 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20894 /* Validate a 128-bit move. */
20895 bool
20896 rs6000_move_128bit_ok_p (rtx operands[])
20898 machine_mode mode = GET_MODE (operands[0]);
20899 return (gpc_reg_operand (operands[0], mode)
20900 || gpc_reg_operand (operands[1], mode));
20903 /* Return true if a 128-bit move needs to be split. */
20904 bool
20905 rs6000_split_128bit_ok_p (rtx operands[])
20907 if (!reload_completed)
20908 return false;
20910 if (!gpr_or_gpr_p (operands[0], operands[1]))
20911 return false;
20913 if (quad_load_store_p (operands[0], operands[1]))
20914 return false;
20916 return true;
20920 /* Given a comparison operation, return the bit number in CCR to test. We
20921 know this is a valid comparison.
20923 SCC_P is 1 if this is for an scc. That means that %D will have been
20924 used instead of %C, so the bits will be in different places.
20926 Return -1 if OP isn't a valid comparison for some reason. */
20929 ccr_bit (rtx op, int scc_p)
20931 enum rtx_code code = GET_CODE (op);
20932 machine_mode cc_mode;
20933 int cc_regnum;
20934 int base_bit;
20935 rtx reg;
20937 if (!COMPARISON_P (op))
20938 return -1;
20940 reg = XEXP (op, 0);
20942 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20944 cc_mode = GET_MODE (reg);
20945 cc_regnum = REGNO (reg);
20946 base_bit = 4 * (cc_regnum - CR0_REGNO);
20948 validate_condition_mode (code, cc_mode);
20950 /* When generating a sCOND operation, only positive conditions are
20951 allowed. */
20952 gcc_assert (!scc_p
20953 || code == EQ || code == GT || code == LT || code == UNORDERED
20954 || code == GTU || code == LTU);
20956 switch (code)
20958 case NE:
20959 return scc_p ? base_bit + 3 : base_bit + 2;
20960 case EQ:
20961 return base_bit + 2;
20962 case GT: case GTU: case UNLE:
20963 return base_bit + 1;
20964 case LT: case LTU: case UNGE:
20965 return base_bit;
20966 case ORDERED: case UNORDERED:
20967 return base_bit + 3;
20969 case GE: case GEU:
20970 /* If scc, we will have done a cror to put the bit in the
20971 unordered position. So test that bit. For integer, this is ! LT
20972 unless this is an scc insn. */
20973 return scc_p ? base_bit + 3 : base_bit;
20975 case LE: case LEU:
20976 return scc_p ? base_bit + 3 : base_bit + 1;
20978 default:
20979 gcc_unreachable ();
20983 /* Return the GOT register. */
20986 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20988 /* The second flow pass currently (June 1999) can't update
20989 regs_ever_live without disturbing other parts of the compiler, so
20990 update it here to make the prolog/epilogue code happy. */
20991 if (!can_create_pseudo_p ()
20992 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20993 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20995 crtl->uses_pic_offset_table = 1;
20997 return pic_offset_table_rtx;
21000 static rs6000_stack_t stack_info;
21002 /* Function to init struct machine_function.
21003 This will be called, via a pointer variable,
21004 from push_function_context. */
21006 static struct machine_function *
21007 rs6000_init_machine_status (void)
21009 stack_info.reload_completed = 0;
21010 return ggc_cleared_alloc<machine_function> ();
21013 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21015 /* Write out a function code label. */
21017 void
21018 rs6000_output_function_entry (FILE *file, const char *fname)
21020 if (fname[0] != '.')
21022 switch (DEFAULT_ABI)
21024 default:
21025 gcc_unreachable ();
21027 case ABI_AIX:
21028 if (DOT_SYMBOLS)
21029 putc ('.', file);
21030 else
21031 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21032 break;
21034 case ABI_ELFv2:
21035 case ABI_V4:
21036 case ABI_DARWIN:
21037 break;
21041 RS6000_OUTPUT_BASENAME (file, fname);
21044 /* Print an operand. Recognize special options, documented below. */
21046 #if TARGET_ELF
21047 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21048 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21049 #else
21050 #define SMALL_DATA_RELOC "sda21"
21051 #define SMALL_DATA_REG 0
21052 #endif
21054 void
21055 print_operand (FILE *file, rtx x, int code)
21057 int i;
21058 unsigned HOST_WIDE_INT uval;
21060 switch (code)
21062 /* %a is output_address. */
21064 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21065 output_operand. */
21067 case 'D':
21068 /* Like 'J' but get to the GT bit only. */
21069 gcc_assert (REG_P (x));
21071 /* Bit 1 is GT bit. */
21072 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21074 /* Add one for shift count in rlinm for scc. */
21075 fprintf (file, "%d", i + 1);
21076 return;
21078 case 'e':
21079 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21080 if (! INT_P (x))
21082 output_operand_lossage ("invalid %%e value");
21083 return;
21086 uval = INTVAL (x);
21087 if ((uval & 0xffff) == 0 && uval != 0)
21088 putc ('s', file);
21089 return;
21091 case 'E':
21092 /* X is a CR register. Print the number of the EQ bit of the CR */
21093 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21094 output_operand_lossage ("invalid %%E value");
21095 else
21096 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21097 return;
21099 case 'f':
21100 /* X is a CR register. Print the shift count needed to move it
21101 to the high-order four bits. */
21102 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21103 output_operand_lossage ("invalid %%f value");
21104 else
21105 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21106 return;
21108 case 'F':
21109 /* Similar, but print the count for the rotate in the opposite
21110 direction. */
21111 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21112 output_operand_lossage ("invalid %%F value");
21113 else
21114 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21115 return;
21117 case 'G':
21118 /* X is a constant integer. If it is negative, print "m",
21119 otherwise print "z". This is to make an aze or ame insn. */
21120 if (GET_CODE (x) != CONST_INT)
21121 output_operand_lossage ("invalid %%G value");
21122 else if (INTVAL (x) >= 0)
21123 putc ('z', file);
21124 else
21125 putc ('m', file);
21126 return;
21128 case 'h':
21129 /* If constant, output low-order five bits. Otherwise, write
21130 normally. */
21131 if (INT_P (x))
21132 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21133 else
21134 print_operand (file, x, 0);
21135 return;
21137 case 'H':
21138 /* If constant, output low-order six bits. Otherwise, write
21139 normally. */
21140 if (INT_P (x))
21141 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21142 else
21143 print_operand (file, x, 0);
21144 return;
21146 case 'I':
21147 /* Print `i' if this is a constant, else nothing. */
21148 if (INT_P (x))
21149 putc ('i', file);
21150 return;
21152 case 'j':
21153 /* Write the bit number in CCR for jump. */
21154 i = ccr_bit (x, 0);
21155 if (i == -1)
21156 output_operand_lossage ("invalid %%j code");
21157 else
21158 fprintf (file, "%d", i);
21159 return;
21161 case 'J':
21162 /* Similar, but add one for shift count in rlinm for scc and pass
21163 scc flag to `ccr_bit'. */
21164 i = ccr_bit (x, 1);
21165 if (i == -1)
21166 output_operand_lossage ("invalid %%J code");
21167 else
21168 /* If we want bit 31, write a shift count of zero, not 32. */
21169 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21170 return;
21172 case 'k':
21173 /* X must be a constant. Write the 1's complement of the
21174 constant. */
21175 if (! INT_P (x))
21176 output_operand_lossage ("invalid %%k value");
21177 else
21178 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21179 return;
21181 case 'K':
21182 /* X must be a symbolic constant on ELF. Write an
21183 expression suitable for an 'addi' that adds in the low 16
21184 bits of the MEM. */
21185 if (GET_CODE (x) == CONST)
21187 if (GET_CODE (XEXP (x, 0)) != PLUS
21188 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21189 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21190 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21191 output_operand_lossage ("invalid %%K value");
21193 print_operand_address (file, x);
21194 fputs ("@l", file);
21195 return;
21197 /* %l is output_asm_label. */
21199 case 'L':
21200 /* Write second word of DImode or DFmode reference. Works on register
21201 or non-indexed memory only. */
21202 if (REG_P (x))
21203 fputs (reg_names[REGNO (x) + 1], file);
21204 else if (MEM_P (x))
21206 machine_mode mode = GET_MODE (x);
21207 /* Handle possible auto-increment. Since it is pre-increment and
21208 we have already done it, we can just use an offset of word. */
21209 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21210 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21211 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21212 UNITS_PER_WORD));
21213 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21214 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21215 UNITS_PER_WORD));
21216 else
21217 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21218 UNITS_PER_WORD),
21219 0));
21221 if (small_data_operand (x, GET_MODE (x)))
21222 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21223 reg_names[SMALL_DATA_REG]);
21225 return;
21227 case 'N':
21228 /* Write the number of elements in the vector times 4. */
21229 if (GET_CODE (x) != PARALLEL)
21230 output_operand_lossage ("invalid %%N value");
21231 else
21232 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21233 return;
21235 case 'O':
21236 /* Similar, but subtract 1 first. */
21237 if (GET_CODE (x) != PARALLEL)
21238 output_operand_lossage ("invalid %%O value");
21239 else
21240 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21241 return;
21243 case 'p':
21244 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21245 if (! INT_P (x)
21246 || INTVAL (x) < 0
21247 || (i = exact_log2 (INTVAL (x))) < 0)
21248 output_operand_lossage ("invalid %%p value");
21249 else
21250 fprintf (file, "%d", i);
21251 return;
21253 case 'P':
21254 /* The operand must be an indirect memory reference. The result
21255 is the register name. */
21256 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21257 || REGNO (XEXP (x, 0)) >= 32)
21258 output_operand_lossage ("invalid %%P value");
21259 else
21260 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21261 return;
21263 case 'q':
21264 /* This outputs the logical code corresponding to a boolean
21265 expression. The expression may have one or both operands
21266 negated (if one, only the first one). For condition register
21267 logical operations, it will also treat the negated
21268 CR codes as NOTs, but not handle NOTs of them. */
21270 const char *const *t = 0;
21271 const char *s;
21272 enum rtx_code code = GET_CODE (x);
21273 static const char * const tbl[3][3] = {
21274 { "and", "andc", "nor" },
21275 { "or", "orc", "nand" },
21276 { "xor", "eqv", "xor" } };
21278 if (code == AND)
21279 t = tbl[0];
21280 else if (code == IOR)
21281 t = tbl[1];
21282 else if (code == XOR)
21283 t = tbl[2];
21284 else
21285 output_operand_lossage ("invalid %%q value");
21287 if (GET_CODE (XEXP (x, 0)) != NOT)
21288 s = t[0];
21289 else
21291 if (GET_CODE (XEXP (x, 1)) == NOT)
21292 s = t[2];
21293 else
21294 s = t[1];
21297 fputs (s, file);
21299 return;
21301 case 'Q':
21302 if (! TARGET_MFCRF)
21303 return;
21304 fputc (',', file);
21305 /* FALLTHRU */
21307 case 'R':
21308 /* X is a CR register. Print the mask for `mtcrf'. */
21309 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21310 output_operand_lossage ("invalid %%R value");
21311 else
21312 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21313 return;
21315 case 's':
21316 /* Low 5 bits of 32 - value */
21317 if (! INT_P (x))
21318 output_operand_lossage ("invalid %%s value");
21319 else
21320 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21321 return;
21323 case 't':
21324 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21325 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21327 /* Bit 3 is OV bit. */
21328 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21330 /* If we want bit 31, write a shift count of zero, not 32. */
21331 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21332 return;
21334 case 'T':
21335 /* Print the symbolic name of a branch target register. */
21336 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21337 && REGNO (x) != CTR_REGNO))
21338 output_operand_lossage ("invalid %%T value");
21339 else if (REGNO (x) == LR_REGNO)
21340 fputs ("lr", file);
21341 else
21342 fputs ("ctr", file);
21343 return;
21345 case 'u':
21346 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21347 for use in unsigned operand. */
21348 if (! INT_P (x))
21350 output_operand_lossage ("invalid %%u value");
21351 return;
21354 uval = INTVAL (x);
21355 if ((uval & 0xffff) == 0)
21356 uval >>= 16;
21358 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21359 return;
21361 case 'v':
21362 /* High-order 16 bits of constant for use in signed operand. */
21363 if (! INT_P (x))
21364 output_operand_lossage ("invalid %%v value");
21365 else
21366 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21367 (INTVAL (x) >> 16) & 0xffff);
21368 return;
21370 case 'U':
21371 /* Print `u' if this has an auto-increment or auto-decrement. */
21372 if (MEM_P (x)
21373 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21374 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21375 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21376 putc ('u', file);
21377 return;
21379 case 'V':
21380 /* Print the trap code for this operand. */
21381 switch (GET_CODE (x))
21383 case EQ:
21384 fputs ("eq", file); /* 4 */
21385 break;
21386 case NE:
21387 fputs ("ne", file); /* 24 */
21388 break;
21389 case LT:
21390 fputs ("lt", file); /* 16 */
21391 break;
21392 case LE:
21393 fputs ("le", file); /* 20 */
21394 break;
21395 case GT:
21396 fputs ("gt", file); /* 8 */
21397 break;
21398 case GE:
21399 fputs ("ge", file); /* 12 */
21400 break;
21401 case LTU:
21402 fputs ("llt", file); /* 2 */
21403 break;
21404 case LEU:
21405 fputs ("lle", file); /* 6 */
21406 break;
21407 case GTU:
21408 fputs ("lgt", file); /* 1 */
21409 break;
21410 case GEU:
21411 fputs ("lge", file); /* 5 */
21412 break;
21413 default:
21414 gcc_unreachable ();
21416 break;
21418 case 'w':
21419 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21420 normally. */
21421 if (INT_P (x))
21422 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21423 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21424 else
21425 print_operand (file, x, 0);
21426 return;
21428 case 'x':
21429 /* X is a FPR or Altivec register used in a VSX context. */
21430 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21431 output_operand_lossage ("invalid %%x value");
21432 else
21434 int reg = REGNO (x);
21435 int vsx_reg = (FP_REGNO_P (reg)
21436 ? reg - 32
21437 : reg - FIRST_ALTIVEC_REGNO + 32);
21439 #ifdef TARGET_REGNAMES
21440 if (TARGET_REGNAMES)
21441 fprintf (file, "%%vs%d", vsx_reg);
21442 else
21443 #endif
21444 fprintf (file, "%d", vsx_reg);
21446 return;
21448 case 'X':
21449 if (MEM_P (x)
21450 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21451 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21452 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21453 putc ('x', file);
21454 return;
21456 case 'Y':
21457 /* Like 'L', for third word of TImode/PTImode */
21458 if (REG_P (x))
21459 fputs (reg_names[REGNO (x) + 2], file);
21460 else if (MEM_P (x))
21462 machine_mode mode = GET_MODE (x);
21463 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21464 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21465 output_address (mode, plus_constant (Pmode,
21466 XEXP (XEXP (x, 0), 0), 8));
21467 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21468 output_address (mode, plus_constant (Pmode,
21469 XEXP (XEXP (x, 0), 0), 8));
21470 else
21471 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21472 if (small_data_operand (x, GET_MODE (x)))
21473 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21474 reg_names[SMALL_DATA_REG]);
21476 return;
21478 case 'z':
21479 /* X is a SYMBOL_REF. Write out the name preceded by a
21480 period and without any trailing data in brackets. Used for function
21481 names. If we are configured for System V (or the embedded ABI) on
21482 the PowerPC, do not emit the period, since those systems do not use
21483 TOCs and the like. */
21484 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21486 /* For macho, check to see if we need a stub. */
21487 if (TARGET_MACHO)
21489 const char *name = XSTR (x, 0);
21490 #if TARGET_MACHO
21491 if (darwin_emit_branch_islands
21492 && MACHOPIC_INDIRECT
21493 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21494 name = machopic_indirection_name (x, /*stub_p=*/true);
21495 #endif
21496 assemble_name (file, name);
21498 else if (!DOT_SYMBOLS)
21499 assemble_name (file, XSTR (x, 0));
21500 else
21501 rs6000_output_function_entry (file, XSTR (x, 0));
21502 return;
21504 case 'Z':
21505 /* Like 'L', for last word of TImode/PTImode. */
21506 if (REG_P (x))
21507 fputs (reg_names[REGNO (x) + 3], file);
21508 else if (MEM_P (x))
21510 machine_mode mode = GET_MODE (x);
21511 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21512 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21513 output_address (mode, plus_constant (Pmode,
21514 XEXP (XEXP (x, 0), 0), 12));
21515 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21516 output_address (mode, plus_constant (Pmode,
21517 XEXP (XEXP (x, 0), 0), 12));
21518 else
21519 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21520 if (small_data_operand (x, GET_MODE (x)))
21521 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21522 reg_names[SMALL_DATA_REG]);
21524 return;
21526 /* Print AltiVec or SPE memory operand. */
21527 case 'y':
21529 rtx tmp;
21531 gcc_assert (MEM_P (x));
21533 tmp = XEXP (x, 0);
21535 /* Ugly hack because %y is overloaded. */
21536 if ((TARGET_SPE || TARGET_E500_DOUBLE)
21537 && (GET_MODE_SIZE (GET_MODE (x)) == 8
21538 || FLOAT128_2REG_P (GET_MODE (x))
21539 || GET_MODE (x) == TImode
21540 || GET_MODE (x) == PTImode))
21542 /* Handle [reg]. */
21543 if (REG_P (tmp))
21545 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
21546 break;
21548 /* Handle [reg+UIMM]. */
21549 else if (GET_CODE (tmp) == PLUS &&
21550 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
21552 int x;
21554 gcc_assert (REG_P (XEXP (tmp, 0)));
21556 x = INTVAL (XEXP (tmp, 1));
21557 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
21558 break;
21561 /* Fall through. Must be [reg+reg]. */
21563 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21564 && GET_CODE (tmp) == AND
21565 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21566 && INTVAL (XEXP (tmp, 1)) == -16)
21567 tmp = XEXP (tmp, 0);
21568 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21569 && GET_CODE (tmp) == PRE_MODIFY)
21570 tmp = XEXP (tmp, 1);
21571 if (REG_P (tmp))
21572 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21573 else
21575 if (GET_CODE (tmp) != PLUS
21576 || !REG_P (XEXP (tmp, 0))
21577 || !REG_P (XEXP (tmp, 1)))
21579 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21580 break;
21583 if (REGNO (XEXP (tmp, 0)) == 0)
21584 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21585 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21586 else
21587 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21588 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21590 break;
21593 case 0:
21594 if (REG_P (x))
21595 fprintf (file, "%s", reg_names[REGNO (x)]);
21596 else if (MEM_P (x))
21598 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21599 know the width from the mode. */
21600 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21601 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21602 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21603 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21604 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21605 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21606 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21607 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21608 else
21609 output_address (GET_MODE (x), XEXP (x, 0));
21611 else
21613 if (toc_relative_expr_p (x, false))
21614 /* This hack along with a corresponding hack in
21615 rs6000_output_addr_const_extra arranges to output addends
21616 where the assembler expects to find them. eg.
21617 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21618 without this hack would be output as "x@toc+4". We
21619 want "x+4@toc". */
21620 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
21621 else
21622 output_addr_const (file, x);
21624 return;
21626 case '&':
21627 if (const char *name = get_some_local_dynamic_name ())
21628 assemble_name (file, name);
21629 else
21630 output_operand_lossage ("'%%&' used without any "
21631 "local dynamic TLS references");
21632 return;
21634 default:
21635 output_operand_lossage ("invalid %%xn code");
21639 /* Print the address of an operand. */
21641 void
21642 print_operand_address (FILE *file, rtx x)
21644 if (REG_P (x))
21645 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21646 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21647 || GET_CODE (x) == LABEL_REF)
21649 output_addr_const (file, x);
21650 if (small_data_operand (x, GET_MODE (x)))
21651 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21652 reg_names[SMALL_DATA_REG]);
21653 else
21654 gcc_assert (!TARGET_TOC);
21656 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21657 && REG_P (XEXP (x, 1)))
21659 if (REGNO (XEXP (x, 0)) == 0)
21660 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21661 reg_names[ REGNO (XEXP (x, 0)) ]);
21662 else
21663 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21664 reg_names[ REGNO (XEXP (x, 1)) ]);
21666 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21667 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21668 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21669 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21670 #if TARGET_MACHO
21671 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21672 && CONSTANT_P (XEXP (x, 1)))
21674 fprintf (file, "lo16(");
21675 output_addr_const (file, XEXP (x, 1));
21676 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21678 #endif
21679 #if TARGET_ELF
21680 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21681 && CONSTANT_P (XEXP (x, 1)))
21683 output_addr_const (file, XEXP (x, 1));
21684 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21686 #endif
21687 else if (toc_relative_expr_p (x, false))
21689 /* This hack along with a corresponding hack in
21690 rs6000_output_addr_const_extra arranges to output addends
21691 where the assembler expects to find them. eg.
21692 (lo_sum (reg 9)
21693 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21694 without this hack would be output as "x@toc+8@l(9)". We
21695 want "x+8@toc@l(9)". */
21696 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
21697 if (GET_CODE (x) == LO_SUM)
21698 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21699 else
21700 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
21702 else
21703 gcc_unreachable ();
21706 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
21708 static bool
21709 rs6000_output_addr_const_extra (FILE *file, rtx x)
21711 if (GET_CODE (x) == UNSPEC)
21712 switch (XINT (x, 1))
21714 case UNSPEC_TOCREL:
21715 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21716 && REG_P (XVECEXP (x, 0, 1))
21717 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21718 output_addr_const (file, XVECEXP (x, 0, 0));
21719 if (x == tocrel_base && tocrel_offset != const0_rtx)
21721 if (INTVAL (tocrel_offset) >= 0)
21722 fprintf (file, "+");
21723 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
21725 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21727 putc ('-', file);
21728 assemble_name (file, toc_label_name);
21729 need_toc_init = 1;
21731 else if (TARGET_ELF)
21732 fputs ("@toc", file);
21733 return true;
21735 #if TARGET_MACHO
21736 case UNSPEC_MACHOPIC_OFFSET:
21737 output_addr_const (file, XVECEXP (x, 0, 0));
21738 putc ('-', file);
21739 machopic_output_function_base_name (file);
21740 return true;
21741 #endif
21743 return false;
21746 /* Target hook for assembling integer objects. The PowerPC version has
21747 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21748 is defined. It also needs to handle DI-mode objects on 64-bit
21749 targets. */
21751 static bool
21752 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21754 #ifdef RELOCATABLE_NEEDS_FIXUP
21755 /* Special handling for SI values. */
21756 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21758 static int recurse = 0;
21760 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21761 the .fixup section. Since the TOC section is already relocated, we
21762 don't need to mark it here. We used to skip the text section, but it
21763 should never be valid for relocated addresses to be placed in the text
21764 section. */
21765 if (DEFAULT_ABI == ABI_V4
21766 && (TARGET_RELOCATABLE || flag_pic > 1)
21767 && in_section != toc_section
21768 && !recurse
21769 && !CONST_SCALAR_INT_P (x)
21770 && CONSTANT_P (x))
21772 char buf[256];
21774 recurse = 1;
21775 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21776 fixuplabelno++;
21777 ASM_OUTPUT_LABEL (asm_out_file, buf);
21778 fprintf (asm_out_file, "\t.long\t(");
21779 output_addr_const (asm_out_file, x);
21780 fprintf (asm_out_file, ")@fixup\n");
21781 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21782 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21783 fprintf (asm_out_file, "\t.long\t");
21784 assemble_name (asm_out_file, buf);
21785 fprintf (asm_out_file, "\n\t.previous\n");
21786 recurse = 0;
21787 return true;
21789 /* Remove initial .'s to turn a -mcall-aixdesc function
21790 address into the address of the descriptor, not the function
21791 itself. */
21792 else if (GET_CODE (x) == SYMBOL_REF
21793 && XSTR (x, 0)[0] == '.'
21794 && DEFAULT_ABI == ABI_AIX)
21796 const char *name = XSTR (x, 0);
21797 while (*name == '.')
21798 name++;
21800 fprintf (asm_out_file, "\t.long\t%s\n", name);
21801 return true;
21804 #endif /* RELOCATABLE_NEEDS_FIXUP */
21805 return default_assemble_integer (x, size, aligned_p);
21808 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21809 /* Emit an assembler directive to set symbol visibility for DECL to
21810 VISIBILITY_TYPE. */
21812 static void
21813 rs6000_assemble_visibility (tree decl, int vis)
21815 if (TARGET_XCOFF)
21816 return;
21818 /* Functions need to have their entry point symbol visibility set as
21819 well as their descriptor symbol visibility. */
21820 if (DEFAULT_ABI == ABI_AIX
21821 && DOT_SYMBOLS
21822 && TREE_CODE (decl) == FUNCTION_DECL)
21824 static const char * const visibility_types[] = {
21825 NULL, "internal", "hidden", "protected"
21828 const char *name, *type;
21830 name = ((* targetm.strip_name_encoding)
21831 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21832 type = visibility_types[vis];
21834 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21835 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21837 else
21838 default_assemble_visibility (decl, vis);
21840 #endif
21842 enum rtx_code
21843 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21845 /* Reversal of FP compares takes care -- an ordered compare
21846 becomes an unordered compare and vice versa. */
21847 if (mode == CCFPmode
21848 && (!flag_finite_math_only
21849 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21850 || code == UNEQ || code == LTGT))
21851 return reverse_condition_maybe_unordered (code);
21852 else
21853 return reverse_condition (code);
21856 /* Generate a compare for CODE. Return a brand-new rtx that
21857 represents the result of the compare. */
21859 static rtx
21860 rs6000_generate_compare (rtx cmp, machine_mode mode)
21862 machine_mode comp_mode;
21863 rtx compare_result;
21864 enum rtx_code code = GET_CODE (cmp);
21865 rtx op0 = XEXP (cmp, 0);
21866 rtx op1 = XEXP (cmp, 1);
21868 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21869 comp_mode = CCmode;
21870 else if (FLOAT_MODE_P (mode))
21871 comp_mode = CCFPmode;
21872 else if (code == GTU || code == LTU
21873 || code == GEU || code == LEU)
21874 comp_mode = CCUNSmode;
21875 else if ((code == EQ || code == NE)
21876 && unsigned_reg_p (op0)
21877 && (unsigned_reg_p (op1)
21878 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21879 /* These are unsigned values, perhaps there will be a later
21880 ordering compare that can be shared with this one. */
21881 comp_mode = CCUNSmode;
21882 else
21883 comp_mode = CCmode;
21885 /* If we have an unsigned compare, make sure we don't have a signed value as
21886 an immediate. */
21887 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21888 && INTVAL (op1) < 0)
21890 op0 = copy_rtx_if_shared (op0);
21891 op1 = force_reg (GET_MODE (op0), op1);
21892 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21895 /* First, the compare. */
21896 compare_result = gen_reg_rtx (comp_mode);
21898 /* E500 FP compare instructions on the GPRs. Yuck! */
21899 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
21900 && FLOAT_MODE_P (mode))
21902 rtx cmp, or_result, compare_result2;
21903 machine_mode op_mode = GET_MODE (op0);
21904 bool reverse_p;
21906 if (op_mode == VOIDmode)
21907 op_mode = GET_MODE (op1);
21909 /* First reverse the condition codes that aren't directly supported. */
21910 switch (code)
21912 case NE:
21913 case UNLT:
21914 case UNLE:
21915 case UNGT:
21916 case UNGE:
21917 code = reverse_condition_maybe_unordered (code);
21918 reverse_p = true;
21919 break;
21921 case EQ:
21922 case LT:
21923 case LE:
21924 case GT:
21925 case GE:
21926 reverse_p = false;
21927 break;
21929 default:
21930 gcc_unreachable ();
21933 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
21934 This explains the following mess. */
21936 switch (code)
21938 case EQ:
21939 switch (op_mode)
21941 case SFmode:
21942 cmp = (flag_finite_math_only && !flag_trapping_math)
21943 ? gen_tstsfeq_gpr (compare_result, op0, op1)
21944 : gen_cmpsfeq_gpr (compare_result, op0, op1);
21945 break;
21947 case DFmode:
21948 cmp = (flag_finite_math_only && !flag_trapping_math)
21949 ? gen_tstdfeq_gpr (compare_result, op0, op1)
21950 : gen_cmpdfeq_gpr (compare_result, op0, op1);
21951 break;
21953 case TFmode:
21954 case IFmode:
21955 case KFmode:
21956 cmp = (flag_finite_math_only && !flag_trapping_math)
21957 ? gen_tsttfeq_gpr (compare_result, op0, op1)
21958 : gen_cmptfeq_gpr (compare_result, op0, op1);
21959 break;
21961 default:
21962 gcc_unreachable ();
21964 break;
21966 case GT:
21967 case GE:
21968 switch (op_mode)
21970 case SFmode:
21971 cmp = (flag_finite_math_only && !flag_trapping_math)
21972 ? gen_tstsfgt_gpr (compare_result, op0, op1)
21973 : gen_cmpsfgt_gpr (compare_result, op0, op1);
21974 break;
21976 case DFmode:
21977 cmp = (flag_finite_math_only && !flag_trapping_math)
21978 ? gen_tstdfgt_gpr (compare_result, op0, op1)
21979 : gen_cmpdfgt_gpr (compare_result, op0, op1);
21980 break;
21982 case TFmode:
21983 case IFmode:
21984 case KFmode:
21985 cmp = (flag_finite_math_only && !flag_trapping_math)
21986 ? gen_tsttfgt_gpr (compare_result, op0, op1)
21987 : gen_cmptfgt_gpr (compare_result, op0, op1);
21988 break;
21990 default:
21991 gcc_unreachable ();
21993 break;
21995 case LT:
21996 case LE:
21997 switch (op_mode)
21999 case SFmode:
22000 cmp = (flag_finite_math_only && !flag_trapping_math)
22001 ? gen_tstsflt_gpr (compare_result, op0, op1)
22002 : gen_cmpsflt_gpr (compare_result, op0, op1);
22003 break;
22005 case DFmode:
22006 cmp = (flag_finite_math_only && !flag_trapping_math)
22007 ? gen_tstdflt_gpr (compare_result, op0, op1)
22008 : gen_cmpdflt_gpr (compare_result, op0, op1);
22009 break;
22011 case TFmode:
22012 case IFmode:
22013 case KFmode:
22014 cmp = (flag_finite_math_only && !flag_trapping_math)
22015 ? gen_tsttflt_gpr (compare_result, op0, op1)
22016 : gen_cmptflt_gpr (compare_result, op0, op1);
22017 break;
22019 default:
22020 gcc_unreachable ();
22022 break;
22024 default:
22025 gcc_unreachable ();
22028 /* Synthesize LE and GE from LT/GT || EQ. */
22029 if (code == LE || code == GE)
22031 emit_insn (cmp);
22033 compare_result2 = gen_reg_rtx (CCFPmode);
22035 /* Do the EQ. */
22036 switch (op_mode)
22038 case SFmode:
22039 cmp = (flag_finite_math_only && !flag_trapping_math)
22040 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
22041 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
22042 break;
22044 case DFmode:
22045 cmp = (flag_finite_math_only && !flag_trapping_math)
22046 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
22047 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
22048 break;
22050 case TFmode:
22051 case IFmode:
22052 case KFmode:
22053 cmp = (flag_finite_math_only && !flag_trapping_math)
22054 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
22055 : gen_cmptfeq_gpr (compare_result2, op0, op1);
22056 break;
22058 default:
22059 gcc_unreachable ();
22062 emit_insn (cmp);
22064 /* OR them together. */
22065 or_result = gen_reg_rtx (CCFPmode);
22066 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
22067 compare_result2);
22068 compare_result = or_result;
22071 code = reverse_p ? NE : EQ;
22073 emit_insn (cmp);
22076 /* IEEE 128-bit support in VSX registers when we do not have hardware
22077 support. */
22078 else if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
22080 rtx libfunc = NULL_RTX;
22081 bool check_nan = false;
22082 rtx dest;
22084 switch (code)
22086 case EQ:
22087 case NE:
22088 libfunc = optab_libfunc (eq_optab, mode);
22089 break;
22091 case GT:
22092 case GE:
22093 libfunc = optab_libfunc (ge_optab, mode);
22094 break;
22096 case LT:
22097 case LE:
22098 libfunc = optab_libfunc (le_optab, mode);
22099 break;
22101 case UNORDERED:
22102 case ORDERED:
22103 libfunc = optab_libfunc (unord_optab, mode);
22104 code = (code == UNORDERED) ? NE : EQ;
22105 break;
22107 case UNGE:
22108 case UNGT:
22109 check_nan = true;
22110 libfunc = optab_libfunc (ge_optab, mode);
22111 code = (code == UNGE) ? GE : GT;
22112 break;
22114 case UNLE:
22115 case UNLT:
22116 check_nan = true;
22117 libfunc = optab_libfunc (le_optab, mode);
22118 code = (code == UNLE) ? LE : LT;
22119 break;
22121 case UNEQ:
22122 case LTGT:
22123 check_nan = true;
22124 libfunc = optab_libfunc (eq_optab, mode);
22125 code = (code = UNEQ) ? EQ : NE;
22126 break;
22128 default:
22129 gcc_unreachable ();
22132 gcc_assert (libfunc);
22134 if (!check_nan)
22135 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22136 SImode, 2, op0, mode, op1, mode);
22138 /* The library signals an exception for signalling NaNs, so we need to
22139 handle isgreater, etc. by first checking isordered. */
22140 else
22142 rtx ne_rtx, normal_dest, unord_dest;
22143 rtx unord_func = optab_libfunc (unord_optab, mode);
22144 rtx join_label = gen_label_rtx ();
22145 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22146 rtx unord_cmp = gen_reg_rtx (comp_mode);
22149 /* Test for either value being a NaN. */
22150 gcc_assert (unord_func);
22151 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22152 SImode, 2, op0, mode, op1,
22153 mode);
22155 /* Set value (0) if either value is a NaN, and jump to the join
22156 label. */
22157 dest = gen_reg_rtx (SImode);
22158 emit_move_insn (dest, const1_rtx);
22159 emit_insn (gen_rtx_SET (unord_cmp,
22160 gen_rtx_COMPARE (comp_mode, unord_dest,
22161 const0_rtx)));
22163 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22164 emit_jump_insn (gen_rtx_SET (pc_rtx,
22165 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22166 join_ref,
22167 pc_rtx)));
22169 /* Do the normal comparison, knowing that the values are not
22170 NaNs. */
22171 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22172 SImode, 2, op0, mode, op1,
22173 mode);
22175 emit_insn (gen_cstoresi4 (dest,
22176 gen_rtx_fmt_ee (code, SImode, normal_dest,
22177 const0_rtx),
22178 normal_dest, const0_rtx));
22180 /* Join NaN and non-Nan paths. Compare dest against 0. */
22181 emit_label (join_label);
22182 code = NE;
22185 emit_insn (gen_rtx_SET (compare_result,
22186 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22189 else
22191 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22192 CLOBBERs to match cmptf_internal2 pattern. */
22193 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22194 && FLOAT128_IBM_P (GET_MODE (op0))
22195 && TARGET_HARD_FLOAT && TARGET_FPRS)
22196 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22197 gen_rtvec (10,
22198 gen_rtx_SET (compare_result,
22199 gen_rtx_COMPARE (comp_mode, op0, op1)),
22200 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22201 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22202 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22203 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22204 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22205 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22206 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22207 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22208 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22209 else if (GET_CODE (op1) == UNSPEC
22210 && XINT (op1, 1) == UNSPEC_SP_TEST)
22212 rtx op1b = XVECEXP (op1, 0, 0);
22213 comp_mode = CCEQmode;
22214 compare_result = gen_reg_rtx (CCEQmode);
22215 if (TARGET_64BIT)
22216 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22217 else
22218 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22220 else
22221 emit_insn (gen_rtx_SET (compare_result,
22222 gen_rtx_COMPARE (comp_mode, op0, op1)));
22225 /* Some kinds of FP comparisons need an OR operation;
22226 under flag_finite_math_only we don't bother. */
22227 if (FLOAT_MODE_P (mode)
22228 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22229 && !flag_finite_math_only
22230 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
22231 && (code == LE || code == GE
22232 || code == UNEQ || code == LTGT
22233 || code == UNGT || code == UNLT))
22235 enum rtx_code or1, or2;
22236 rtx or1_rtx, or2_rtx, compare2_rtx;
22237 rtx or_result = gen_reg_rtx (CCEQmode);
22239 switch (code)
22241 case LE: or1 = LT; or2 = EQ; break;
22242 case GE: or1 = GT; or2 = EQ; break;
22243 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22244 case LTGT: or1 = LT; or2 = GT; break;
22245 case UNGT: or1 = UNORDERED; or2 = GT; break;
22246 case UNLT: or1 = UNORDERED; or2 = LT; break;
22247 default: gcc_unreachable ();
22249 validate_condition_mode (or1, comp_mode);
22250 validate_condition_mode (or2, comp_mode);
22251 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22252 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22253 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22254 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22255 const_true_rtx);
22256 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22258 compare_result = or_result;
22259 code = EQ;
22262 validate_condition_mode (code, GET_MODE (compare_result));
22264 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22268 /* Return the diagnostic message string if the binary operation OP is
22269 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22271 static const char*
22272 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22273 const_tree type1,
22274 const_tree type2)
22276 enum machine_mode mode1 = TYPE_MODE (type1);
22277 enum machine_mode mode2 = TYPE_MODE (type2);
22279 /* For complex modes, use the inner type. */
22280 if (COMPLEX_MODE_P (mode1))
22281 mode1 = GET_MODE_INNER (mode1);
22283 if (COMPLEX_MODE_P (mode2))
22284 mode2 = GET_MODE_INNER (mode2);
22286 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22287 double to intermix unless -mfloat128-convert. */
22288 if (mode1 == mode2)
22289 return NULL;
22291 if (!TARGET_FLOAT128_CVT)
22293 if ((mode1 == KFmode && mode2 == IFmode)
22294 || (mode1 == IFmode && mode2 == KFmode))
22295 return N_("__float128 and __ibm128 cannot be used in the same "
22296 "expression");
22298 if (TARGET_IEEEQUAD
22299 && ((mode1 == IFmode && mode2 == TFmode)
22300 || (mode1 == TFmode && mode2 == IFmode)))
22301 return N_("__ibm128 and long double cannot be used in the same "
22302 "expression");
22304 if (!TARGET_IEEEQUAD
22305 && ((mode1 == KFmode && mode2 == TFmode)
22306 || (mode1 == TFmode && mode2 == KFmode)))
22307 return N_("__float128 and long double cannot be used in the same "
22308 "expression");
22311 return NULL;
22315 /* Expand floating point conversion to/from __float128 and __ibm128. */
22317 void
22318 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22320 machine_mode dest_mode = GET_MODE (dest);
22321 machine_mode src_mode = GET_MODE (src);
22322 convert_optab cvt = unknown_optab;
22323 bool do_move = false;
22324 rtx libfunc = NULL_RTX;
22325 rtx dest2;
22326 typedef rtx (*rtx_2func_t) (rtx, rtx);
22327 rtx_2func_t hw_convert = (rtx_2func_t)0;
22328 size_t kf_or_tf;
22330 struct hw_conv_t {
22331 rtx_2func_t from_df;
22332 rtx_2func_t from_sf;
22333 rtx_2func_t from_si_sign;
22334 rtx_2func_t from_si_uns;
22335 rtx_2func_t from_di_sign;
22336 rtx_2func_t from_di_uns;
22337 rtx_2func_t to_df;
22338 rtx_2func_t to_sf;
22339 rtx_2func_t to_si_sign;
22340 rtx_2func_t to_si_uns;
22341 rtx_2func_t to_di_sign;
22342 rtx_2func_t to_di_uns;
22343 } hw_conversions[2] = {
22344 /* convertions to/from KFmode */
22346 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22347 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22348 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22349 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22350 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22351 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22352 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22353 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22354 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22355 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22356 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22357 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22360 /* convertions to/from TFmode */
22362 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22363 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22364 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22365 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22366 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22367 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22368 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22369 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22370 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22371 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22372 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22373 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22377 if (dest_mode == src_mode)
22378 gcc_unreachable ();
22380 /* Eliminate memory operations. */
22381 if (MEM_P (src))
22382 src = force_reg (src_mode, src);
22384 if (MEM_P (dest))
22386 rtx tmp = gen_reg_rtx (dest_mode);
22387 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22388 rs6000_emit_move (dest, tmp, dest_mode);
22389 return;
22392 /* Convert to IEEE 128-bit floating point. */
22393 if (FLOAT128_IEEE_P (dest_mode))
22395 if (dest_mode == KFmode)
22396 kf_or_tf = 0;
22397 else if (dest_mode == TFmode)
22398 kf_or_tf = 1;
22399 else
22400 gcc_unreachable ();
22402 switch (src_mode)
22404 case DFmode:
22405 cvt = sext_optab;
22406 hw_convert = hw_conversions[kf_or_tf].from_df;
22407 break;
22409 case SFmode:
22410 cvt = sext_optab;
22411 hw_convert = hw_conversions[kf_or_tf].from_sf;
22412 break;
22414 case KFmode:
22415 case IFmode:
22416 case TFmode:
22417 if (FLOAT128_IBM_P (src_mode))
22418 cvt = sext_optab;
22419 else
22420 do_move = true;
22421 break;
22423 case SImode:
22424 if (unsigned_p)
22426 cvt = ufloat_optab;
22427 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22429 else
22431 cvt = sfloat_optab;
22432 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22434 break;
22436 case DImode:
22437 if (unsigned_p)
22439 cvt = ufloat_optab;
22440 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22442 else
22444 cvt = sfloat_optab;
22445 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22447 break;
22449 default:
22450 gcc_unreachable ();
22454 /* Convert from IEEE 128-bit floating point. */
22455 else if (FLOAT128_IEEE_P (src_mode))
22457 if (src_mode == KFmode)
22458 kf_or_tf = 0;
22459 else if (src_mode == TFmode)
22460 kf_or_tf = 1;
22461 else
22462 gcc_unreachable ();
22464 switch (dest_mode)
22466 case DFmode:
22467 cvt = trunc_optab;
22468 hw_convert = hw_conversions[kf_or_tf].to_df;
22469 break;
22471 case SFmode:
22472 cvt = trunc_optab;
22473 hw_convert = hw_conversions[kf_or_tf].to_sf;
22474 break;
22476 case KFmode:
22477 case IFmode:
22478 case TFmode:
22479 if (FLOAT128_IBM_P (dest_mode))
22480 cvt = trunc_optab;
22481 else
22482 do_move = true;
22483 break;
22485 case SImode:
22486 if (unsigned_p)
22488 cvt = ufix_optab;
22489 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22491 else
22493 cvt = sfix_optab;
22494 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22496 break;
22498 case DImode:
22499 if (unsigned_p)
22501 cvt = ufix_optab;
22502 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22504 else
22506 cvt = sfix_optab;
22507 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22509 break;
22511 default:
22512 gcc_unreachable ();
22516 /* Both IBM format. */
22517 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22518 do_move = true;
22520 else
22521 gcc_unreachable ();
22523 /* Handle conversion between TFmode/KFmode. */
22524 if (do_move)
22525 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22527 /* Handle conversion if we have hardware support. */
22528 else if (TARGET_FLOAT128_HW && hw_convert)
22529 emit_insn ((hw_convert) (dest, src));
22531 /* Call an external function to do the conversion. */
22532 else if (cvt != unknown_optab)
22534 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22535 gcc_assert (libfunc != NULL_RTX);
22537 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode, 1, src,
22538 src_mode);
22540 gcc_assert (dest2 != NULL_RTX);
22541 if (!rtx_equal_p (dest, dest2))
22542 emit_move_insn (dest, dest2);
22545 else
22546 gcc_unreachable ();
22548 return;
22551 /* Split a conversion from __float128 to an integer type into separate insns.
22552 OPERANDS points to the destination, source, and V2DI temporary
22553 register. CODE is either FIX or UNSIGNED_FIX. */
22555 void
22556 convert_float128_to_int (rtx *operands, enum rtx_code code)
22558 rtx dest = operands[0];
22559 rtx src = operands[1];
22560 rtx tmp = operands[2];
22561 rtx cvt;
22562 rtvec cvt_vec;
22563 rtx cvt_unspec;
22564 rtvec move_vec;
22565 rtx move_unspec;
22567 if (GET_CODE (tmp) == SCRATCH)
22568 tmp = gen_reg_rtx (V2DImode);
22570 if (MEM_P (dest))
22571 dest = rs6000_address_for_fpconvert (dest);
22573 /* Generate the actual convert insn of the form:
22574 (set (tmp) (unspec:V2DI [(fix:SI (reg:KF))] UNSPEC_IEEE128_CONVERT)). */
22575 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), src);
22576 cvt_vec = gen_rtvec (1, cvt);
22577 cvt_unspec = gen_rtx_UNSPEC (V2DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
22578 emit_insn (gen_rtx_SET (tmp, cvt_unspec));
22580 /* Generate the move insn of the form:
22581 (set (dest:SI) (unspec:SI [(tmp:V2DI))] UNSPEC_IEEE128_MOVE)). */
22582 move_vec = gen_rtvec (1, tmp);
22583 move_unspec = gen_rtx_UNSPEC (GET_MODE (dest), move_vec, UNSPEC_IEEE128_MOVE);
22584 emit_insn (gen_rtx_SET (dest, move_unspec));
22587 /* Split a conversion from an integer type to __float128 into separate insns.
22588 OPERANDS points to the destination, source, and V2DI temporary
22589 register. CODE is either FLOAT or UNSIGNED_FLOAT. */
22591 void
22592 convert_int_to_float128 (rtx *operands, enum rtx_code code)
22594 rtx dest = operands[0];
22595 rtx src = operands[1];
22596 rtx tmp = operands[2];
22597 rtx cvt;
22598 rtvec cvt_vec;
22599 rtx cvt_unspec;
22600 rtvec move_vec;
22601 rtx move_unspec;
22602 rtx unsigned_flag;
22604 if (GET_CODE (tmp) == SCRATCH)
22605 tmp = gen_reg_rtx (V2DImode);
22607 if (MEM_P (src))
22608 src = rs6000_address_for_fpconvert (src);
22610 /* Generate the move of the integer into the Altivec register of the form:
22611 (set (tmp:V2DI) (unspec:V2DI [(src:SI)
22612 (const_int 0)] UNSPEC_IEEE128_MOVE)).
22615 (set (tmp:V2DI) (unspec:V2DI [(src:DI)] UNSPEC_IEEE128_MOVE)). */
22617 if (GET_MODE (src) == SImode)
22619 unsigned_flag = (code == UNSIGNED_FLOAT) ? const1_rtx : const0_rtx;
22620 move_vec = gen_rtvec (2, src, unsigned_flag);
22622 else
22623 move_vec = gen_rtvec (1, src);
22625 move_unspec = gen_rtx_UNSPEC (V2DImode, move_vec, UNSPEC_IEEE128_MOVE);
22626 emit_insn (gen_rtx_SET (tmp, move_unspec));
22628 /* Generate the actual convert insn of the form:
22629 (set (dest:KF) (float:KF (unspec:DI [(tmp:V2DI)]
22630 UNSPEC_IEEE128_CONVERT))). */
22631 cvt_vec = gen_rtvec (1, tmp);
22632 cvt_unspec = gen_rtx_UNSPEC (DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
22633 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), cvt_unspec);
22634 emit_insn (gen_rtx_SET (dest, cvt));
22638 /* Emit the RTL for an sISEL pattern. */
22640 void
22641 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22643 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22646 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22647 can be used as that dest register. Return the dest register. */
22650 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22652 if (op2 == const0_rtx)
22653 return op1;
22655 if (GET_CODE (scratch) == SCRATCH)
22656 scratch = gen_reg_rtx (mode);
22658 if (logical_operand (op2, mode))
22659 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22660 else
22661 emit_insn (gen_rtx_SET (scratch,
22662 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22664 return scratch;
22667 void
22668 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22670 rtx condition_rtx;
22671 machine_mode op_mode;
22672 enum rtx_code cond_code;
22673 rtx result = operands[0];
22675 condition_rtx = rs6000_generate_compare (operands[1], mode);
22676 cond_code = GET_CODE (condition_rtx);
22678 if (FLOAT_MODE_P (mode)
22679 && !TARGET_FPRS && TARGET_HARD_FLOAT)
22681 rtx t;
22683 PUT_MODE (condition_rtx, SImode);
22684 t = XEXP (condition_rtx, 0);
22686 gcc_assert (cond_code == NE || cond_code == EQ);
22688 if (cond_code == NE)
22689 emit_insn (gen_e500_flip_gt_bit (t, t));
22691 emit_insn (gen_move_from_CR_gt_bit (result, t));
22692 return;
22695 if (cond_code == NE
22696 || cond_code == GE || cond_code == LE
22697 || cond_code == GEU || cond_code == LEU
22698 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22700 rtx not_result = gen_reg_rtx (CCEQmode);
22701 rtx not_op, rev_cond_rtx;
22702 machine_mode cc_mode;
22704 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22706 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22707 SImode, XEXP (condition_rtx, 0), const0_rtx);
22708 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22709 emit_insn (gen_rtx_SET (not_result, not_op));
22710 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22713 op_mode = GET_MODE (XEXP (operands[1], 0));
22714 if (op_mode == VOIDmode)
22715 op_mode = GET_MODE (XEXP (operands[1], 1));
22717 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22719 PUT_MODE (condition_rtx, DImode);
22720 convert_move (result, condition_rtx, 0);
22722 else
22724 PUT_MODE (condition_rtx, SImode);
22725 emit_insn (gen_rtx_SET (result, condition_rtx));
22729 /* Emit a branch of kind CODE to location LOC. */
22731 void
22732 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22734 rtx condition_rtx, loc_ref;
22736 condition_rtx = rs6000_generate_compare (operands[0], mode);
22737 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22738 emit_jump_insn (gen_rtx_SET (pc_rtx,
22739 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22740 loc_ref, pc_rtx)));
22743 /* Return the string to output a conditional branch to LABEL, which is
22744 the operand template of the label, or NULL if the branch is really a
22745 conditional return.
22747 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22748 condition code register and its mode specifies what kind of
22749 comparison we made.
22751 REVERSED is nonzero if we should reverse the sense of the comparison.
22753 INSN is the insn. */
22755 char *
22756 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22758 static char string[64];
22759 enum rtx_code code = GET_CODE (op);
22760 rtx cc_reg = XEXP (op, 0);
22761 machine_mode mode = GET_MODE (cc_reg);
22762 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22763 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22764 int really_reversed = reversed ^ need_longbranch;
22765 char *s = string;
22766 const char *ccode;
22767 const char *pred;
22768 rtx note;
22770 validate_condition_mode (code, mode);
22772 /* Work out which way this really branches. We could use
22773 reverse_condition_maybe_unordered here always but this
22774 makes the resulting assembler clearer. */
22775 if (really_reversed)
22777 /* Reversal of FP compares takes care -- an ordered compare
22778 becomes an unordered compare and vice versa. */
22779 if (mode == CCFPmode)
22780 code = reverse_condition_maybe_unordered (code);
22781 else
22782 code = reverse_condition (code);
22785 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
22787 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
22788 to the GT bit. */
22789 switch (code)
22791 case EQ:
22792 /* Opposite of GT. */
22793 code = GT;
22794 break;
22796 case NE:
22797 code = UNLE;
22798 break;
22800 default:
22801 gcc_unreachable ();
22805 switch (code)
22807 /* Not all of these are actually distinct opcodes, but
22808 we distinguish them for clarity of the resulting assembler. */
22809 case NE: case LTGT:
22810 ccode = "ne"; break;
22811 case EQ: case UNEQ:
22812 ccode = "eq"; break;
22813 case GE: case GEU:
22814 ccode = "ge"; break;
22815 case GT: case GTU: case UNGT:
22816 ccode = "gt"; break;
22817 case LE: case LEU:
22818 ccode = "le"; break;
22819 case LT: case LTU: case UNLT:
22820 ccode = "lt"; break;
22821 case UNORDERED: ccode = "un"; break;
22822 case ORDERED: ccode = "nu"; break;
22823 case UNGE: ccode = "nl"; break;
22824 case UNLE: ccode = "ng"; break;
22825 default:
22826 gcc_unreachable ();
22829 /* Maybe we have a guess as to how likely the branch is. */
22830 pred = "";
22831 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22832 if (note != NULL_RTX)
22834 /* PROB is the difference from 50%. */
22835 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
22837 /* Only hint for highly probable/improbable branches on newer cpus when
22838 we have real profile data, as static prediction overrides processor
22839 dynamic prediction. For older cpus we may as well always hint, but
22840 assume not taken for branches that are very close to 50% as a
22841 mispredicted taken branch is more expensive than a
22842 mispredicted not-taken branch. */
22843 if (rs6000_always_hint
22844 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22845 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22846 && br_prob_note_reliable_p (note)))
22848 if (abs (prob) > REG_BR_PROB_BASE / 20
22849 && ((prob > 0) ^ need_longbranch))
22850 pred = "+";
22851 else
22852 pred = "-";
22856 if (label == NULL)
22857 s += sprintf (s, "b%slr%s ", ccode, pred);
22858 else
22859 s += sprintf (s, "b%s%s ", ccode, pred);
22861 /* We need to escape any '%' characters in the reg_names string.
22862 Assume they'd only be the first character.... */
22863 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22864 *s++ = '%';
22865 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22867 if (label != NULL)
22869 /* If the branch distance was too far, we may have to use an
22870 unconditional branch to go the distance. */
22871 if (need_longbranch)
22872 s += sprintf (s, ",$+8\n\tb %s", label);
22873 else
22874 s += sprintf (s, ",%s", label);
22877 return string;
22880 /* Return the string to flip the GT bit on a CR. */
22881 char *
22882 output_e500_flip_gt_bit (rtx dst, rtx src)
22884 static char string[64];
22885 int a, b;
22887 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
22888 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
22890 /* GT bit. */
22891 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
22892 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
22894 sprintf (string, "crnot %d,%d", a, b);
22895 return string;
22898 /* Return insn for VSX or Altivec comparisons. */
22900 static rtx
22901 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22903 rtx mask;
22904 machine_mode mode = GET_MODE (op0);
22906 switch (code)
22908 default:
22909 break;
22911 case GE:
22912 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22913 return NULL_RTX;
22915 case EQ:
22916 case GT:
22917 case GTU:
22918 case ORDERED:
22919 case UNORDERED:
22920 case UNEQ:
22921 case LTGT:
22922 mask = gen_reg_rtx (mode);
22923 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22924 return mask;
22927 return NULL_RTX;
22930 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22931 DMODE is expected destination mode. This is a recursive function. */
22933 static rtx
22934 rs6000_emit_vector_compare (enum rtx_code rcode,
22935 rtx op0, rtx op1,
22936 machine_mode dmode)
22938 rtx mask;
22939 bool swap_operands = false;
22940 bool try_again = false;
22942 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22943 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22945 /* See if the comparison works as is. */
22946 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22947 if (mask)
22948 return mask;
22950 switch (rcode)
22952 case LT:
22953 rcode = GT;
22954 swap_operands = true;
22955 try_again = true;
22956 break;
22957 case LTU:
22958 rcode = GTU;
22959 swap_operands = true;
22960 try_again = true;
22961 break;
22962 case NE:
22963 case UNLE:
22964 case UNLT:
22965 case UNGE:
22966 case UNGT:
22967 /* Invert condition and try again.
22968 e.g., A != B becomes ~(A==B). */
22970 enum rtx_code rev_code;
22971 enum insn_code nor_code;
22972 rtx mask2;
22974 rev_code = reverse_condition_maybe_unordered (rcode);
22975 if (rev_code == UNKNOWN)
22976 return NULL_RTX;
22978 nor_code = optab_handler (one_cmpl_optab, dmode);
22979 if (nor_code == CODE_FOR_nothing)
22980 return NULL_RTX;
22982 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22983 if (!mask2)
22984 return NULL_RTX;
22986 mask = gen_reg_rtx (dmode);
22987 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22988 return mask;
22990 break;
22991 case GE:
22992 case GEU:
22993 case LE:
22994 case LEU:
22995 /* Try GT/GTU/LT/LTU OR EQ */
22997 rtx c_rtx, eq_rtx;
22998 enum insn_code ior_code;
22999 enum rtx_code new_code;
23001 switch (rcode)
23003 case GE:
23004 new_code = GT;
23005 break;
23007 case GEU:
23008 new_code = GTU;
23009 break;
23011 case LE:
23012 new_code = LT;
23013 break;
23015 case LEU:
23016 new_code = LTU;
23017 break;
23019 default:
23020 gcc_unreachable ();
23023 ior_code = optab_handler (ior_optab, dmode);
23024 if (ior_code == CODE_FOR_nothing)
23025 return NULL_RTX;
23027 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
23028 if (!c_rtx)
23029 return NULL_RTX;
23031 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
23032 if (!eq_rtx)
23033 return NULL_RTX;
23035 mask = gen_reg_rtx (dmode);
23036 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
23037 return mask;
23039 break;
23040 default:
23041 return NULL_RTX;
23044 if (try_again)
23046 if (swap_operands)
23047 std::swap (op0, op1);
23049 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
23050 if (mask)
23051 return mask;
23054 /* You only get two chances. */
23055 return NULL_RTX;
23058 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
23059 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
23060 operands for the relation operation COND. */
23063 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
23064 rtx cond, rtx cc_op0, rtx cc_op1)
23066 machine_mode dest_mode = GET_MODE (dest);
23067 machine_mode mask_mode = GET_MODE (cc_op0);
23068 enum rtx_code rcode = GET_CODE (cond);
23069 machine_mode cc_mode = CCmode;
23070 rtx mask;
23071 rtx cond2;
23072 rtx tmp;
23073 bool invert_move = false;
23075 if (VECTOR_UNIT_NONE_P (dest_mode))
23076 return 0;
23078 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
23079 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
23081 switch (rcode)
23083 /* Swap operands if we can, and fall back to doing the operation as
23084 specified, and doing a NOR to invert the test. */
23085 case NE:
23086 case UNLE:
23087 case UNLT:
23088 case UNGE:
23089 case UNGT:
23090 /* Invert condition and try again.
23091 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
23092 invert_move = true;
23093 rcode = reverse_condition_maybe_unordered (rcode);
23094 if (rcode == UNKNOWN)
23095 return 0;
23096 break;
23098 /* Mark unsigned tests with CCUNSmode. */
23099 case GTU:
23100 case GEU:
23101 case LTU:
23102 case LEU:
23103 cc_mode = CCUNSmode;
23104 break;
23106 default:
23107 break;
23110 /* Get the vector mask for the given relational operations. */
23111 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
23113 if (!mask)
23114 return 0;
23116 if (invert_move)
23118 tmp = op_true;
23119 op_true = op_false;
23120 op_false = tmp;
23123 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
23124 CONST0_RTX (dest_mode));
23125 emit_insn (gen_rtx_SET (dest,
23126 gen_rtx_IF_THEN_ELSE (dest_mode,
23127 cond2,
23128 op_true,
23129 op_false)));
23130 return 1;
23133 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
23134 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
23135 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
23136 hardware has no such operation. */
23138 static int
23139 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23141 enum rtx_code code = GET_CODE (op);
23142 rtx op0 = XEXP (op, 0);
23143 rtx op1 = XEXP (op, 1);
23144 machine_mode compare_mode = GET_MODE (op0);
23145 machine_mode result_mode = GET_MODE (dest);
23146 bool max_p = false;
23148 if (result_mode != compare_mode)
23149 return 0;
23151 if (code == GE || code == GT)
23152 max_p = true;
23153 else if (code == LE || code == LT)
23154 max_p = false;
23155 else
23156 return 0;
23158 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
23161 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
23162 max_p = !max_p;
23164 else
23165 return 0;
23167 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
23168 return 1;
23171 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
23172 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
23173 operands of the last comparison is nonzero/true, FALSE_COND if it is
23174 zero/false. Return 0 if the hardware has no such operation. */
23176 static int
23177 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23179 enum rtx_code code = GET_CODE (op);
23180 rtx op0 = XEXP (op, 0);
23181 rtx op1 = XEXP (op, 1);
23182 machine_mode result_mode = GET_MODE (dest);
23183 rtx compare_rtx;
23184 rtx cmove_rtx;
23185 rtx clobber_rtx;
23187 if (!can_create_pseudo_p ())
23188 return 0;
23190 switch (code)
23192 case EQ:
23193 case GE:
23194 case GT:
23195 break;
23197 case NE:
23198 case LT:
23199 case LE:
23200 code = swap_condition (code);
23201 std::swap (op0, op1);
23202 break;
23204 default:
23205 return 0;
23208 /* Generate: [(parallel [(set (dest)
23209 (if_then_else (op (cmp1) (cmp2))
23210 (true)
23211 (false)))
23212 (clobber (scratch))])]. */
23214 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
23215 cmove_rtx = gen_rtx_SET (dest,
23216 gen_rtx_IF_THEN_ELSE (result_mode,
23217 compare_rtx,
23218 true_cond,
23219 false_cond));
23221 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
23222 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23223 gen_rtvec (2, cmove_rtx, clobber_rtx)));
23225 return 1;
23228 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23229 operands of the last comparison is nonzero/true, FALSE_COND if it
23230 is zero/false. Return 0 if the hardware has no such operation. */
23233 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23235 enum rtx_code code = GET_CODE (op);
23236 rtx op0 = XEXP (op, 0);
23237 rtx op1 = XEXP (op, 1);
23238 machine_mode compare_mode = GET_MODE (op0);
23239 machine_mode result_mode = GET_MODE (dest);
23240 rtx temp;
23241 bool is_against_zero;
23243 /* These modes should always match. */
23244 if (GET_MODE (op1) != compare_mode
23245 /* In the isel case however, we can use a compare immediate, so
23246 op1 may be a small constant. */
23247 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23248 return 0;
23249 if (GET_MODE (true_cond) != result_mode)
23250 return 0;
23251 if (GET_MODE (false_cond) != result_mode)
23252 return 0;
23254 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23255 if (TARGET_P9_MINMAX
23256 && (compare_mode == SFmode || compare_mode == DFmode)
23257 && (result_mode == SFmode || result_mode == DFmode))
23259 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23260 return 1;
23262 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23263 return 1;
23266 /* Don't allow using floating point comparisons for integer results for
23267 now. */
23268 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23269 return 0;
23271 /* First, work out if the hardware can do this at all, or
23272 if it's too slow.... */
23273 if (!FLOAT_MODE_P (compare_mode))
23275 if (TARGET_ISEL)
23276 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23277 return 0;
23279 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
23280 && SCALAR_FLOAT_MODE_P (compare_mode))
23281 return 0;
23283 is_against_zero = op1 == CONST0_RTX (compare_mode);
23285 /* A floating-point subtract might overflow, underflow, or produce
23286 an inexact result, thus changing the floating-point flags, so it
23287 can't be generated if we care about that. It's safe if one side
23288 of the construct is zero, since then no subtract will be
23289 generated. */
23290 if (SCALAR_FLOAT_MODE_P (compare_mode)
23291 && flag_trapping_math && ! is_against_zero)
23292 return 0;
23294 /* Eliminate half of the comparisons by switching operands, this
23295 makes the remaining code simpler. */
23296 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23297 || code == LTGT || code == LT || code == UNLE)
23299 code = reverse_condition_maybe_unordered (code);
23300 temp = true_cond;
23301 true_cond = false_cond;
23302 false_cond = temp;
23305 /* UNEQ and LTGT take four instructions for a comparison with zero,
23306 it'll probably be faster to use a branch here too. */
23307 if (code == UNEQ && HONOR_NANS (compare_mode))
23308 return 0;
23310 /* We're going to try to implement comparisons by performing
23311 a subtract, then comparing against zero. Unfortunately,
23312 Inf - Inf is NaN which is not zero, and so if we don't
23313 know that the operand is finite and the comparison
23314 would treat EQ different to UNORDERED, we can't do it. */
23315 if (HONOR_INFINITIES (compare_mode)
23316 && code != GT && code != UNGE
23317 && (GET_CODE (op1) != CONST_DOUBLE
23318 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23319 /* Constructs of the form (a OP b ? a : b) are safe. */
23320 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23321 || (! rtx_equal_p (op0, true_cond)
23322 && ! rtx_equal_p (op1, true_cond))))
23323 return 0;
23325 /* At this point we know we can use fsel. */
23327 /* Reduce the comparison to a comparison against zero. */
23328 if (! is_against_zero)
23330 temp = gen_reg_rtx (compare_mode);
23331 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23332 op0 = temp;
23333 op1 = CONST0_RTX (compare_mode);
23336 /* If we don't care about NaNs we can reduce some of the comparisons
23337 down to faster ones. */
23338 if (! HONOR_NANS (compare_mode))
23339 switch (code)
23341 case GT:
23342 code = LE;
23343 temp = true_cond;
23344 true_cond = false_cond;
23345 false_cond = temp;
23346 break;
23347 case UNGE:
23348 code = GE;
23349 break;
23350 case UNEQ:
23351 code = EQ;
23352 break;
23353 default:
23354 break;
23357 /* Now, reduce everything down to a GE. */
23358 switch (code)
23360 case GE:
23361 break;
23363 case LE:
23364 temp = gen_reg_rtx (compare_mode);
23365 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23366 op0 = temp;
23367 break;
23369 case ORDERED:
23370 temp = gen_reg_rtx (compare_mode);
23371 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23372 op0 = temp;
23373 break;
23375 case EQ:
23376 temp = gen_reg_rtx (compare_mode);
23377 emit_insn (gen_rtx_SET (temp,
23378 gen_rtx_NEG (compare_mode,
23379 gen_rtx_ABS (compare_mode, op0))));
23380 op0 = temp;
23381 break;
23383 case UNGE:
23384 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23385 temp = gen_reg_rtx (result_mode);
23386 emit_insn (gen_rtx_SET (temp,
23387 gen_rtx_IF_THEN_ELSE (result_mode,
23388 gen_rtx_GE (VOIDmode,
23389 op0, op1),
23390 true_cond, false_cond)));
23391 false_cond = true_cond;
23392 true_cond = temp;
23394 temp = gen_reg_rtx (compare_mode);
23395 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23396 op0 = temp;
23397 break;
23399 case GT:
23400 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23401 temp = gen_reg_rtx (result_mode);
23402 emit_insn (gen_rtx_SET (temp,
23403 gen_rtx_IF_THEN_ELSE (result_mode,
23404 gen_rtx_GE (VOIDmode,
23405 op0, op1),
23406 true_cond, false_cond)));
23407 true_cond = false_cond;
23408 false_cond = temp;
23410 temp = gen_reg_rtx (compare_mode);
23411 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23412 op0 = temp;
23413 break;
23415 default:
23416 gcc_unreachable ();
23419 emit_insn (gen_rtx_SET (dest,
23420 gen_rtx_IF_THEN_ELSE (result_mode,
23421 gen_rtx_GE (VOIDmode,
23422 op0, op1),
23423 true_cond, false_cond)));
23424 return 1;
23427 /* Same as above, but for ints (isel). */
23429 static int
23430 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23432 rtx condition_rtx, cr;
23433 machine_mode mode = GET_MODE (dest);
23434 enum rtx_code cond_code;
23435 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23436 bool signedp;
23438 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23439 return 0;
23441 /* We still have to do the compare, because isel doesn't do a
23442 compare, it just looks at the CRx bits set by a previous compare
23443 instruction. */
23444 condition_rtx = rs6000_generate_compare (op, mode);
23445 cond_code = GET_CODE (condition_rtx);
23446 cr = XEXP (condition_rtx, 0);
23447 signedp = GET_MODE (cr) == CCmode;
23449 isel_func = (mode == SImode
23450 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23451 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23453 switch (cond_code)
23455 case LT: case GT: case LTU: case GTU: case EQ:
23456 /* isel handles these directly. */
23457 break;
23459 default:
23460 /* We need to swap the sense of the comparison. */
23462 std::swap (false_cond, true_cond);
23463 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23465 break;
23468 false_cond = force_reg (mode, false_cond);
23469 if (true_cond != const0_rtx)
23470 true_cond = force_reg (mode, true_cond);
23472 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23474 return 1;
23477 const char *
23478 output_isel (rtx *operands)
23480 enum rtx_code code;
23482 code = GET_CODE (operands[1]);
23484 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23486 gcc_assert (GET_CODE (operands[2]) == REG
23487 && GET_CODE (operands[3]) == REG);
23488 PUT_CODE (operands[1], reverse_condition (code));
23489 return "isel %0,%3,%2,%j1";
23492 return "isel %0,%2,%3,%j1";
23495 void
23496 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23498 machine_mode mode = GET_MODE (op0);
23499 enum rtx_code c;
23500 rtx target;
23502 /* VSX/altivec have direct min/max insns. */
23503 if ((code == SMAX || code == SMIN)
23504 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23505 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23507 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23508 return;
23511 if (code == SMAX || code == SMIN)
23512 c = GE;
23513 else
23514 c = GEU;
23516 if (code == SMAX || code == UMAX)
23517 target = emit_conditional_move (dest, c, op0, op1, mode,
23518 op0, op1, mode, 0);
23519 else
23520 target = emit_conditional_move (dest, c, op0, op1, mode,
23521 op1, op0, mode, 0);
23522 gcc_assert (target);
23523 if (target != dest)
23524 emit_move_insn (dest, target);
23527 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23528 for the value to come from memory or if it is already loaded into a GPR. */
23530 void
23531 rs6000_split_signbit (rtx dest, rtx src)
23533 machine_mode d_mode = GET_MODE (dest);
23534 machine_mode s_mode = GET_MODE (src);
23535 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23536 rtx shift_reg = dest_di;
23538 gcc_assert (REG_P (dest));
23539 gcc_assert (REG_P (src) || MEM_P (src));
23540 gcc_assert (s_mode == KFmode || s_mode == TFmode);
23542 if (MEM_P (src))
23544 rtx mem = (WORDS_BIG_ENDIAN
23545 ? adjust_address (src, DImode, 0)
23546 : adjust_address (src, DImode, 8));
23547 emit_insn (gen_rtx_SET (dest_di, mem));
23550 else
23552 unsigned int r = REGNO (src);
23554 /* If this is a VSX register, generate the special mfvsrd instruction
23555 to get it in a GPR. Until we support SF and DF modes, that will
23556 always be true. */
23557 gcc_assert (VSX_REGNO_P (r));
23559 if (s_mode == KFmode)
23560 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23561 else
23562 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23565 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23566 return;
23569 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23570 COND is true. Mark the jump as unlikely to be taken. */
23572 static void
23573 emit_unlikely_jump (rtx cond, rtx label)
23575 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
23576 rtx x;
23578 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23579 x = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23580 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
23583 /* A subroutine of the atomic operation splitters. Emit a load-locked
23584 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23585 the zero_extend operation. */
23587 static void
23588 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23590 rtx (*fn) (rtx, rtx) = NULL;
23592 switch (mode)
23594 case QImode:
23595 fn = gen_load_lockedqi;
23596 break;
23597 case HImode:
23598 fn = gen_load_lockedhi;
23599 break;
23600 case SImode:
23601 if (GET_MODE (mem) == QImode)
23602 fn = gen_load_lockedqi_si;
23603 else if (GET_MODE (mem) == HImode)
23604 fn = gen_load_lockedhi_si;
23605 else
23606 fn = gen_load_lockedsi;
23607 break;
23608 case DImode:
23609 fn = gen_load_lockeddi;
23610 break;
23611 case TImode:
23612 fn = gen_load_lockedti;
23613 break;
23614 default:
23615 gcc_unreachable ();
23617 emit_insn (fn (reg, mem));
23620 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23621 instruction in MODE. */
23623 static void
23624 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23626 rtx (*fn) (rtx, rtx, rtx) = NULL;
23628 switch (mode)
23630 case QImode:
23631 fn = gen_store_conditionalqi;
23632 break;
23633 case HImode:
23634 fn = gen_store_conditionalhi;
23635 break;
23636 case SImode:
23637 fn = gen_store_conditionalsi;
23638 break;
23639 case DImode:
23640 fn = gen_store_conditionaldi;
23641 break;
23642 case TImode:
23643 fn = gen_store_conditionalti;
23644 break;
23645 default:
23646 gcc_unreachable ();
23649 /* Emit sync before stwcx. to address PPC405 Erratum. */
23650 if (PPC405_ERRATUM77)
23651 emit_insn (gen_hwsync ());
23653 emit_insn (fn (res, mem, val));
23656 /* Expand barriers before and after a load_locked/store_cond sequence. */
23658 static rtx
23659 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23661 rtx addr = XEXP (mem, 0);
23662 int strict_p = (reload_in_progress || reload_completed);
23664 if (!legitimate_indirect_address_p (addr, strict_p)
23665 && !legitimate_indexed_address_p (addr, strict_p))
23667 addr = force_reg (Pmode, addr);
23668 mem = replace_equiv_address_nv (mem, addr);
23671 switch (model)
23673 case MEMMODEL_RELAXED:
23674 case MEMMODEL_CONSUME:
23675 case MEMMODEL_ACQUIRE:
23676 break;
23677 case MEMMODEL_RELEASE:
23678 case MEMMODEL_ACQ_REL:
23679 emit_insn (gen_lwsync ());
23680 break;
23681 case MEMMODEL_SEQ_CST:
23682 emit_insn (gen_hwsync ());
23683 break;
23684 default:
23685 gcc_unreachable ();
23687 return mem;
23690 static void
23691 rs6000_post_atomic_barrier (enum memmodel model)
23693 switch (model)
23695 case MEMMODEL_RELAXED:
23696 case MEMMODEL_CONSUME:
23697 case MEMMODEL_RELEASE:
23698 break;
23699 case MEMMODEL_ACQUIRE:
23700 case MEMMODEL_ACQ_REL:
23701 case MEMMODEL_SEQ_CST:
23702 emit_insn (gen_isync ());
23703 break;
23704 default:
23705 gcc_unreachable ();
23709 /* A subroutine of the various atomic expanders. For sub-word operations,
23710 we must adjust things to operate on SImode. Given the original MEM,
23711 return a new aligned memory. Also build and return the quantities by
23712 which to shift and mask. */
23714 static rtx
23715 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23717 rtx addr, align, shift, mask, mem;
23718 HOST_WIDE_INT shift_mask;
23719 machine_mode mode = GET_MODE (orig_mem);
23721 /* For smaller modes, we have to implement this via SImode. */
23722 shift_mask = (mode == QImode ? 0x18 : 0x10);
23724 addr = XEXP (orig_mem, 0);
23725 addr = force_reg (GET_MODE (addr), addr);
23727 /* Aligned memory containing subword. Generate a new memory. We
23728 do not want any of the existing MEM_ATTR data, as we're now
23729 accessing memory outside the original object. */
23730 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23731 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23732 mem = gen_rtx_MEM (SImode, align);
23733 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23734 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23735 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23737 /* Shift amount for subword relative to aligned word. */
23738 shift = gen_reg_rtx (SImode);
23739 addr = gen_lowpart (SImode, addr);
23740 rtx tmp = gen_reg_rtx (SImode);
23741 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23742 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23743 if (BYTES_BIG_ENDIAN)
23744 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23745 shift, 1, OPTAB_LIB_WIDEN);
23746 *pshift = shift;
23748 /* Mask for insertion. */
23749 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23750 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23751 *pmask = mask;
23753 return mem;
23756 /* A subroutine of the various atomic expanders. For sub-word operands,
23757 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23759 static rtx
23760 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23762 rtx x;
23764 x = gen_reg_rtx (SImode);
23765 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23766 gen_rtx_NOT (SImode, mask),
23767 oldval)));
23769 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23771 return x;
23774 /* A subroutine of the various atomic expanders. For sub-word operands,
23775 extract WIDE to NARROW via SHIFT. */
23777 static void
23778 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23780 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23781 wide, 1, OPTAB_LIB_WIDEN);
23782 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23785 /* Expand an atomic compare and swap operation. */
23787 void
23788 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23790 rtx boolval, retval, mem, oldval, newval, cond;
23791 rtx label1, label2, x, mask, shift;
23792 machine_mode mode, orig_mode;
23793 enum memmodel mod_s, mod_f;
23794 bool is_weak;
23796 boolval = operands[0];
23797 retval = operands[1];
23798 mem = operands[2];
23799 oldval = operands[3];
23800 newval = operands[4];
23801 is_weak = (INTVAL (operands[5]) != 0);
23802 mod_s = memmodel_base (INTVAL (operands[6]));
23803 mod_f = memmodel_base (INTVAL (operands[7]));
23804 orig_mode = mode = GET_MODE (mem);
23806 mask = shift = NULL_RTX;
23807 if (mode == QImode || mode == HImode)
23809 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23810 lwarx and shift/mask operations. With power8, we need to do the
23811 comparison in SImode, but the store is still done in QI/HImode. */
23812 oldval = convert_modes (SImode, mode, oldval, 1);
23814 if (!TARGET_SYNC_HI_QI)
23816 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23818 /* Shift and mask OLDVAL into position with the word. */
23819 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23820 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23822 /* Shift and mask NEWVAL into position within the word. */
23823 newval = convert_modes (SImode, mode, newval, 1);
23824 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23825 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23828 /* Prepare to adjust the return value. */
23829 retval = gen_reg_rtx (SImode);
23830 mode = SImode;
23832 else if (reg_overlap_mentioned_p (retval, oldval))
23833 oldval = copy_to_reg (oldval);
23835 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23836 oldval = copy_to_mode_reg (mode, oldval);
23838 if (reg_overlap_mentioned_p (retval, newval))
23839 newval = copy_to_reg (newval);
23841 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23843 label1 = NULL_RTX;
23844 if (!is_weak)
23846 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23847 emit_label (XEXP (label1, 0));
23849 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23851 emit_load_locked (mode, retval, mem);
23853 x = retval;
23854 if (mask)
23855 x = expand_simple_binop (SImode, AND, retval, mask,
23856 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23858 cond = gen_reg_rtx (CCmode);
23859 /* If we have TImode, synthesize a comparison. */
23860 if (mode != TImode)
23861 x = gen_rtx_COMPARE (CCmode, x, oldval);
23862 else
23864 rtx xor1_result = gen_reg_rtx (DImode);
23865 rtx xor2_result = gen_reg_rtx (DImode);
23866 rtx or_result = gen_reg_rtx (DImode);
23867 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23868 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23869 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23870 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23872 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23873 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23874 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23875 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23878 emit_insn (gen_rtx_SET (cond, x));
23880 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23881 emit_unlikely_jump (x, label2);
23883 x = newval;
23884 if (mask)
23885 x = rs6000_mask_atomic_subword (retval, newval, mask);
23887 emit_store_conditional (orig_mode, cond, mem, x);
23889 if (!is_weak)
23891 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23892 emit_unlikely_jump (x, label1);
23895 if (!is_mm_relaxed (mod_f))
23896 emit_label (XEXP (label2, 0));
23898 rs6000_post_atomic_barrier (mod_s);
23900 if (is_mm_relaxed (mod_f))
23901 emit_label (XEXP (label2, 0));
23903 if (shift)
23904 rs6000_finish_atomic_subword (operands[1], retval, shift);
23905 else if (mode != GET_MODE (operands[1]))
23906 convert_move (operands[1], retval, 1);
23908 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23909 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23910 emit_insn (gen_rtx_SET (boolval, x));
23913 /* Expand an atomic exchange operation. */
23915 void
23916 rs6000_expand_atomic_exchange (rtx operands[])
23918 rtx retval, mem, val, cond;
23919 machine_mode mode;
23920 enum memmodel model;
23921 rtx label, x, mask, shift;
23923 retval = operands[0];
23924 mem = operands[1];
23925 val = operands[2];
23926 model = memmodel_base (INTVAL (operands[3]));
23927 mode = GET_MODE (mem);
23929 mask = shift = NULL_RTX;
23930 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23932 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23934 /* Shift and mask VAL into position with the word. */
23935 val = convert_modes (SImode, mode, val, 1);
23936 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23937 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23939 /* Prepare to adjust the return value. */
23940 retval = gen_reg_rtx (SImode);
23941 mode = SImode;
23944 mem = rs6000_pre_atomic_barrier (mem, model);
23946 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23947 emit_label (XEXP (label, 0));
23949 emit_load_locked (mode, retval, mem);
23951 x = val;
23952 if (mask)
23953 x = rs6000_mask_atomic_subword (retval, val, mask);
23955 cond = gen_reg_rtx (CCmode);
23956 emit_store_conditional (mode, cond, mem, x);
23958 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23959 emit_unlikely_jump (x, label);
23961 rs6000_post_atomic_barrier (model);
23963 if (shift)
23964 rs6000_finish_atomic_subword (operands[0], retval, shift);
23967 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23968 to perform. MEM is the memory on which to operate. VAL is the second
23969 operand of the binary operator. BEFORE and AFTER are optional locations to
23970 return the value of MEM either before of after the operation. MODEL_RTX
23971 is a CONST_INT containing the memory model to use. */
23973 void
23974 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23975 rtx orig_before, rtx orig_after, rtx model_rtx)
23977 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23978 machine_mode mode = GET_MODE (mem);
23979 machine_mode store_mode = mode;
23980 rtx label, x, cond, mask, shift;
23981 rtx before = orig_before, after = orig_after;
23983 mask = shift = NULL_RTX;
23984 /* On power8, we want to use SImode for the operation. On previous systems,
23985 use the operation in a subword and shift/mask to get the proper byte or
23986 halfword. */
23987 if (mode == QImode || mode == HImode)
23989 if (TARGET_SYNC_HI_QI)
23991 val = convert_modes (SImode, mode, val, 1);
23993 /* Prepare to adjust the return value. */
23994 before = gen_reg_rtx (SImode);
23995 if (after)
23996 after = gen_reg_rtx (SImode);
23997 mode = SImode;
23999 else
24001 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
24003 /* Shift and mask VAL into position with the word. */
24004 val = convert_modes (SImode, mode, val, 1);
24005 val = expand_simple_binop (SImode, ASHIFT, val, shift,
24006 NULL_RTX, 1, OPTAB_LIB_WIDEN);
24008 switch (code)
24010 case IOR:
24011 case XOR:
24012 /* We've already zero-extended VAL. That is sufficient to
24013 make certain that it does not affect other bits. */
24014 mask = NULL;
24015 break;
24017 case AND:
24018 /* If we make certain that all of the other bits in VAL are
24019 set, that will be sufficient to not affect other bits. */
24020 x = gen_rtx_NOT (SImode, mask);
24021 x = gen_rtx_IOR (SImode, x, val);
24022 emit_insn (gen_rtx_SET (val, x));
24023 mask = NULL;
24024 break;
24026 case NOT:
24027 case PLUS:
24028 case MINUS:
24029 /* These will all affect bits outside the field and need
24030 adjustment via MASK within the loop. */
24031 break;
24033 default:
24034 gcc_unreachable ();
24037 /* Prepare to adjust the return value. */
24038 before = gen_reg_rtx (SImode);
24039 if (after)
24040 after = gen_reg_rtx (SImode);
24041 store_mode = mode = SImode;
24045 mem = rs6000_pre_atomic_barrier (mem, model);
24047 label = gen_label_rtx ();
24048 emit_label (label);
24049 label = gen_rtx_LABEL_REF (VOIDmode, label);
24051 if (before == NULL_RTX)
24052 before = gen_reg_rtx (mode);
24054 emit_load_locked (mode, before, mem);
24056 if (code == NOT)
24058 x = expand_simple_binop (mode, AND, before, val,
24059 NULL_RTX, 1, OPTAB_LIB_WIDEN);
24060 after = expand_simple_unop (mode, NOT, x, after, 1);
24062 else
24064 after = expand_simple_binop (mode, code, before, val,
24065 after, 1, OPTAB_LIB_WIDEN);
24068 x = after;
24069 if (mask)
24071 x = expand_simple_binop (SImode, AND, after, mask,
24072 NULL_RTX, 1, OPTAB_LIB_WIDEN);
24073 x = rs6000_mask_atomic_subword (before, x, mask);
24075 else if (store_mode != mode)
24076 x = convert_modes (store_mode, mode, x, 1);
24078 cond = gen_reg_rtx (CCmode);
24079 emit_store_conditional (store_mode, cond, mem, x);
24081 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
24082 emit_unlikely_jump (x, label);
24084 rs6000_post_atomic_barrier (model);
24086 if (shift)
24088 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
24089 then do the calcuations in a SImode register. */
24090 if (orig_before)
24091 rs6000_finish_atomic_subword (orig_before, before, shift);
24092 if (orig_after)
24093 rs6000_finish_atomic_subword (orig_after, after, shift);
24095 else if (store_mode != mode)
24097 /* QImode/HImode on machines with lbarx/lharx where we do the native
24098 operation and then do the calcuations in a SImode register. */
24099 if (orig_before)
24100 convert_move (orig_before, before, 1);
24101 if (orig_after)
24102 convert_move (orig_after, after, 1);
24104 else if (orig_after && after != orig_after)
24105 emit_move_insn (orig_after, after);
24108 /* Emit instructions to move SRC to DST. Called by splitters for
24109 multi-register moves. It will emit at most one instruction for
24110 each register that is accessed; that is, it won't emit li/lis pairs
24111 (or equivalent for 64-bit code). One of SRC or DST must be a hard
24112 register. */
24114 void
24115 rs6000_split_multireg_move (rtx dst, rtx src)
24117 /* The register number of the first register being moved. */
24118 int reg;
24119 /* The mode that is to be moved. */
24120 machine_mode mode;
24121 /* The mode that the move is being done in, and its size. */
24122 machine_mode reg_mode;
24123 int reg_mode_size;
24124 /* The number of registers that will be moved. */
24125 int nregs;
24127 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
24128 mode = GET_MODE (dst);
24129 nregs = hard_regno_nregs[reg][mode];
24130 if (FP_REGNO_P (reg))
24131 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
24132 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
24133 else if (ALTIVEC_REGNO_P (reg))
24134 reg_mode = V16QImode;
24135 else if (TARGET_E500_DOUBLE && FLOAT128_2REG_P (mode))
24136 reg_mode = DFmode;
24137 else
24138 reg_mode = word_mode;
24139 reg_mode_size = GET_MODE_SIZE (reg_mode);
24141 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
24143 /* TDmode residing in FP registers is special, since the ISA requires that
24144 the lower-numbered word of a register pair is always the most significant
24145 word, even in little-endian mode. This does not match the usual subreg
24146 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
24147 the appropriate constituent registers "by hand" in little-endian mode.
24149 Note we do not need to check for destructive overlap here since TDmode
24150 can only reside in even/odd register pairs. */
24151 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
24153 rtx p_src, p_dst;
24154 int i;
24156 for (i = 0; i < nregs; i++)
24158 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
24159 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
24160 else
24161 p_src = simplify_gen_subreg (reg_mode, src, mode,
24162 i * reg_mode_size);
24164 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
24165 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
24166 else
24167 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
24168 i * reg_mode_size);
24170 emit_insn (gen_rtx_SET (p_dst, p_src));
24173 return;
24176 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
24178 /* Move register range backwards, if we might have destructive
24179 overlap. */
24180 int i;
24181 for (i = nregs - 1; i >= 0; i--)
24182 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24183 i * reg_mode_size),
24184 simplify_gen_subreg (reg_mode, src, mode,
24185 i * reg_mode_size)));
24187 else
24189 int i;
24190 int j = -1;
24191 bool used_update = false;
24192 rtx restore_basereg = NULL_RTX;
24194 if (MEM_P (src) && INT_REGNO_P (reg))
24196 rtx breg;
24198 if (GET_CODE (XEXP (src, 0)) == PRE_INC
24199 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
24201 rtx delta_rtx;
24202 breg = XEXP (XEXP (src, 0), 0);
24203 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
24204 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
24205 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
24206 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24207 src = replace_equiv_address (src, breg);
24209 else if (! rs6000_offsettable_memref_p (src, reg_mode))
24211 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
24213 rtx basereg = XEXP (XEXP (src, 0), 0);
24214 if (TARGET_UPDATE)
24216 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
24217 emit_insn (gen_rtx_SET (ndst,
24218 gen_rtx_MEM (reg_mode,
24219 XEXP (src, 0))));
24220 used_update = true;
24222 else
24223 emit_insn (gen_rtx_SET (basereg,
24224 XEXP (XEXP (src, 0), 1)));
24225 src = replace_equiv_address (src, basereg);
24227 else
24229 rtx basereg = gen_rtx_REG (Pmode, reg);
24230 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
24231 src = replace_equiv_address (src, basereg);
24235 breg = XEXP (src, 0);
24236 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
24237 breg = XEXP (breg, 0);
24239 /* If the base register we are using to address memory is
24240 also a destination reg, then change that register last. */
24241 if (REG_P (breg)
24242 && REGNO (breg) >= REGNO (dst)
24243 && REGNO (breg) < REGNO (dst) + nregs)
24244 j = REGNO (breg) - REGNO (dst);
24246 else if (MEM_P (dst) && INT_REGNO_P (reg))
24248 rtx breg;
24250 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
24251 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
24253 rtx delta_rtx;
24254 breg = XEXP (XEXP (dst, 0), 0);
24255 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
24256 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
24257 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
24259 /* We have to update the breg before doing the store.
24260 Use store with update, if available. */
24262 if (TARGET_UPDATE)
24264 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24265 emit_insn (TARGET_32BIT
24266 ? (TARGET_POWERPC64
24267 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
24268 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
24269 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
24270 used_update = true;
24272 else
24273 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24274 dst = replace_equiv_address (dst, breg);
24276 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
24277 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24279 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24281 rtx basereg = XEXP (XEXP (dst, 0), 0);
24282 if (TARGET_UPDATE)
24284 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24285 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24286 XEXP (dst, 0)),
24287 nsrc));
24288 used_update = true;
24290 else
24291 emit_insn (gen_rtx_SET (basereg,
24292 XEXP (XEXP (dst, 0), 1)));
24293 dst = replace_equiv_address (dst, basereg);
24295 else
24297 rtx basereg = XEXP (XEXP (dst, 0), 0);
24298 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24299 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24300 && REG_P (basereg)
24301 && REG_P (offsetreg)
24302 && REGNO (basereg) != REGNO (offsetreg));
24303 if (REGNO (basereg) == 0)
24305 rtx tmp = offsetreg;
24306 offsetreg = basereg;
24307 basereg = tmp;
24309 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24310 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24311 dst = replace_equiv_address (dst, basereg);
24314 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24315 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
24318 for (i = 0; i < nregs; i++)
24320 /* Calculate index to next subword. */
24321 ++j;
24322 if (j == nregs)
24323 j = 0;
24325 /* If compiler already emitted move of first word by
24326 store with update, no need to do anything. */
24327 if (j == 0 && used_update)
24328 continue;
24330 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24331 j * reg_mode_size),
24332 simplify_gen_subreg (reg_mode, src, mode,
24333 j * reg_mode_size)));
24335 if (restore_basereg != NULL_RTX)
24336 emit_insn (restore_basereg);
24341 /* This page contains routines that are used to determine what the
24342 function prologue and epilogue code will do and write them out. */
24344 static inline bool
24345 save_reg_p (int r)
24347 return !call_used_regs[r] && df_regs_ever_live_p (r);
24350 /* Determine whether the gp REG is really used. */
24352 static bool
24353 rs6000_reg_live_or_pic_offset_p (int reg)
24355 /* We need to mark the PIC offset register live for the same conditions
24356 as it is set up, or otherwise it won't be saved before we clobber it. */
24358 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24360 if (TARGET_TOC && TARGET_MINIMAL_TOC
24361 && (crtl->calls_eh_return
24362 || df_regs_ever_live_p (reg)
24363 || get_pool_size ()))
24364 return true;
24366 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24367 && flag_pic)
24368 return true;
24371 /* If the function calls eh_return, claim used all the registers that would
24372 be checked for liveness otherwise. */
24374 return ((crtl->calls_eh_return || df_regs_ever_live_p (reg))
24375 && !call_used_regs[reg]);
24378 /* Return the first fixed-point register that is required to be
24379 saved. 32 if none. */
24382 first_reg_to_save (void)
24384 int first_reg;
24386 /* Find lowest numbered live register. */
24387 for (first_reg = 13; first_reg <= 31; first_reg++)
24388 if (save_reg_p (first_reg))
24389 break;
24391 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
24392 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
24393 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24394 || (TARGET_TOC && TARGET_MINIMAL_TOC))
24395 && rs6000_reg_live_or_pic_offset_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
24396 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
24398 #if TARGET_MACHO
24399 if (flag_pic
24400 && crtl->uses_pic_offset_table
24401 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24402 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24403 #endif
24405 return first_reg;
24408 /* Similar, for FP regs. */
24411 first_fp_reg_to_save (void)
24413 int first_reg;
24415 /* Find lowest numbered live register. */
24416 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24417 if (save_reg_p (first_reg))
24418 break;
24420 return first_reg;
24423 /* Similar, for AltiVec regs. */
24425 static int
24426 first_altivec_reg_to_save (void)
24428 int i;
24430 /* Stack frame remains as is unless we are in AltiVec ABI. */
24431 if (! TARGET_ALTIVEC_ABI)
24432 return LAST_ALTIVEC_REGNO + 1;
24434 /* On Darwin, the unwind routines are compiled without
24435 TARGET_ALTIVEC, and use save_world to save/restore the
24436 altivec registers when necessary. */
24437 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24438 && ! TARGET_ALTIVEC)
24439 return FIRST_ALTIVEC_REGNO + 20;
24441 /* Find lowest numbered live register. */
24442 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24443 if (save_reg_p (i))
24444 break;
24446 return i;
24449 /* Return a 32-bit mask of the AltiVec registers we need to set in
24450 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24451 the 32-bit word is 0. */
24453 static unsigned int
24454 compute_vrsave_mask (void)
24456 unsigned int i, mask = 0;
24458 /* On Darwin, the unwind routines are compiled without
24459 TARGET_ALTIVEC, and use save_world to save/restore the
24460 call-saved altivec registers when necessary. */
24461 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24462 && ! TARGET_ALTIVEC)
24463 mask |= 0xFFF;
24465 /* First, find out if we use _any_ altivec registers. */
24466 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24467 if (df_regs_ever_live_p (i))
24468 mask |= ALTIVEC_REG_BIT (i);
24470 if (mask == 0)
24471 return mask;
24473 /* Next, remove the argument registers from the set. These must
24474 be in the VRSAVE mask set by the caller, so we don't need to add
24475 them in again. More importantly, the mask we compute here is
24476 used to generate CLOBBERs in the set_vrsave insn, and we do not
24477 wish the argument registers to die. */
24478 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24479 mask &= ~ALTIVEC_REG_BIT (i);
24481 /* Similarly, remove the return value from the set. */
24483 bool yes = false;
24484 diddle_return_value (is_altivec_return_reg, &yes);
24485 if (yes)
24486 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24489 return mask;
24492 /* For a very restricted set of circumstances, we can cut down the
24493 size of prologues/epilogues by calling our own save/restore-the-world
24494 routines. */
24496 static void
24497 compute_save_world_info (rs6000_stack_t *info)
24499 info->world_save_p = 1;
24500 info->world_save_p
24501 = (WORLD_SAVE_P (info)
24502 && DEFAULT_ABI == ABI_DARWIN
24503 && !cfun->has_nonlocal_label
24504 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24505 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24506 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24507 && info->cr_save_p);
24509 /* This will not work in conjunction with sibcalls. Make sure there
24510 are none. (This check is expensive, but seldom executed.) */
24511 if (WORLD_SAVE_P (info))
24513 rtx_insn *insn;
24514 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24515 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24517 info->world_save_p = 0;
24518 break;
24522 if (WORLD_SAVE_P (info))
24524 /* Even if we're not touching VRsave, make sure there's room on the
24525 stack for it, if it looks like we're calling SAVE_WORLD, which
24526 will attempt to save it. */
24527 info->vrsave_size = 4;
24529 /* If we are going to save the world, we need to save the link register too. */
24530 info->lr_save_p = 1;
24532 /* "Save" the VRsave register too if we're saving the world. */
24533 if (info->vrsave_mask == 0)
24534 info->vrsave_mask = compute_vrsave_mask ();
24536 /* Because the Darwin register save/restore routines only handle
24537 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24538 check. */
24539 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24540 && (info->first_altivec_reg_save
24541 >= FIRST_SAVED_ALTIVEC_REGNO));
24544 return;
24548 static void
24549 is_altivec_return_reg (rtx reg, void *xyes)
24551 bool *yes = (bool *) xyes;
24552 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24553 *yes = true;
24557 /* Return whether REG is a global user reg or has been specifed by
24558 -ffixed-REG. We should not restore these, and so cannot use
24559 lmw or out-of-line restore functions if there are any. We also
24560 can't save them (well, emit frame notes for them), because frame
24561 unwinding during exception handling will restore saved registers. */
24563 static bool
24564 fixed_reg_p (int reg)
24566 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24567 backend sets it, overriding anything the user might have given. */
24568 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24569 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24570 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24571 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24572 return false;
24574 return fixed_regs[reg];
24577 /* Determine the strategy for savings/restoring registers. */
24579 enum {
24580 SAVE_MULTIPLE = 0x1,
24581 SAVE_INLINE_GPRS = 0x2,
24582 SAVE_INLINE_FPRS = 0x4,
24583 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24584 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24585 SAVE_INLINE_VRS = 0x20,
24586 REST_MULTIPLE = 0x100,
24587 REST_INLINE_GPRS = 0x200,
24588 REST_INLINE_FPRS = 0x400,
24589 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24590 REST_INLINE_VRS = 0x1000
24593 static int
24594 rs6000_savres_strategy (rs6000_stack_t *info,
24595 bool using_static_chain_p)
24597 int strategy = 0;
24599 /* Select between in-line and out-of-line save and restore of regs.
24600 First, all the obvious cases where we don't use out-of-line. */
24601 if (crtl->calls_eh_return
24602 || cfun->machine->ra_need_lr)
24603 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24604 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24605 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24607 if (info->first_gp_reg_save == 32)
24608 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24610 if (info->first_fp_reg_save == 64
24611 /* The out-of-line FP routines use double-precision stores;
24612 we can't use those routines if we don't have such stores. */
24613 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24614 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24616 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24617 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24619 /* Define cutoff for using out-of-line functions to save registers. */
24620 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24622 if (!optimize_size)
24624 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24625 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24626 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24628 else
24630 /* Prefer out-of-line restore if it will exit. */
24631 if (info->first_fp_reg_save > 61)
24632 strategy |= SAVE_INLINE_FPRS;
24633 if (info->first_gp_reg_save > 29)
24635 if (info->first_fp_reg_save == 64)
24636 strategy |= SAVE_INLINE_GPRS;
24637 else
24638 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24640 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24641 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24644 else if (DEFAULT_ABI == ABI_DARWIN)
24646 if (info->first_fp_reg_save > 60)
24647 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24648 if (info->first_gp_reg_save > 29)
24649 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24650 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24652 else
24654 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24655 if (info->first_fp_reg_save > 61)
24656 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24657 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24658 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24661 /* Don't bother to try to save things out-of-line if r11 is occupied
24662 by the static chain. It would require too much fiddling and the
24663 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24664 pointer on Darwin, and AIX uses r1 or r12. */
24665 if (using_static_chain_p
24666 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24667 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24668 | SAVE_INLINE_GPRS
24669 | SAVE_INLINE_VRS);
24671 /* Saving CR interferes with the exit routines used on the SPE, so
24672 just punt here. */
24673 if (TARGET_SPE_ABI
24674 && info->spe_64bit_regs_used
24675 && info->cr_save_p)
24676 strategy |= REST_INLINE_GPRS;
24678 /* We can only use the out-of-line routines to restore fprs if we've
24679 saved all the registers from first_fp_reg_save in the prologue.
24680 Otherwise, we risk loading garbage. Of course, if we have saved
24681 out-of-line then we know we haven't skipped any fprs. */
24682 if ((strategy & SAVE_INLINE_FPRS)
24683 && !(strategy & REST_INLINE_FPRS))
24685 int i;
24687 for (i = info->first_fp_reg_save; i < 64; i++)
24688 if (fixed_regs[i] || !save_reg_p (i))
24690 strategy |= REST_INLINE_FPRS;
24691 break;
24695 /* Similarly, for altivec regs. */
24696 if ((strategy & SAVE_INLINE_VRS)
24697 && !(strategy & REST_INLINE_VRS))
24699 int i;
24701 for (i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24702 if (fixed_regs[i] || !save_reg_p (i))
24704 strategy |= REST_INLINE_VRS;
24705 break;
24709 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24710 saved is an out-of-line save or restore. Set up the value for
24711 the next test (excluding out-of-line gprs). */
24712 bool lr_save_p = (info->lr_save_p
24713 || !(strategy & SAVE_INLINE_FPRS)
24714 || !(strategy & SAVE_INLINE_VRS)
24715 || !(strategy & REST_INLINE_FPRS)
24716 || !(strategy & REST_INLINE_VRS));
24718 if (TARGET_MULTIPLE
24719 && !TARGET_POWERPC64
24720 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
24721 && info->first_gp_reg_save < 31)
24723 /* Prefer store multiple for saves over out-of-line routines,
24724 since the store-multiple instruction will always be smaller. */
24725 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24727 /* The situation is more complicated with load multiple. We'd
24728 prefer to use the out-of-line routines for restores, since the
24729 "exit" out-of-line routines can handle the restore of LR and the
24730 frame teardown. However if doesn't make sense to use the
24731 out-of-line routine if that is the only reason we'd need to save
24732 LR, and we can't use the "exit" out-of-line gpr restore if we
24733 have saved some fprs; In those cases it is advantageous to use
24734 load multiple when available. */
24735 if (info->first_fp_reg_save != 64 || !lr_save_p)
24736 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24739 /* Using the "exit" out-of-line routine does not improve code size
24740 if using it would require lr to be saved and if only saving one
24741 or two gprs. */
24742 else if (!lr_save_p && info->first_gp_reg_save > 29)
24743 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24745 /* We can only use load multiple or the out-of-line routines to
24746 restore gprs if we've saved all the registers from
24747 first_gp_reg_save. Otherwise, we risk loading garbage.
24748 Of course, if we have saved out-of-line or used stmw then we know
24749 we haven't skipped any gprs. */
24750 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24751 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24753 int i;
24755 for (i = info->first_gp_reg_save; i < 32; i++)
24756 if (fixed_reg_p (i) || !save_reg_p (i))
24758 strategy |= REST_INLINE_GPRS;
24759 strategy &= ~REST_MULTIPLE;
24760 break;
24764 if (TARGET_ELF && TARGET_64BIT)
24766 if (!(strategy & SAVE_INLINE_FPRS))
24767 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24768 else if (!(strategy & SAVE_INLINE_GPRS)
24769 && info->first_fp_reg_save == 64)
24770 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24772 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24773 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24775 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24776 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24778 return strategy;
24781 /* Calculate the stack information for the current function. This is
24782 complicated by having two separate calling sequences, the AIX calling
24783 sequence and the V.4 calling sequence.
24785 AIX (and Darwin/Mac OS X) stack frames look like:
24786 32-bit 64-bit
24787 SP----> +---------------------------------------+
24788 | back chain to caller | 0 0
24789 +---------------------------------------+
24790 | saved CR | 4 8 (8-11)
24791 +---------------------------------------+
24792 | saved LR | 8 16
24793 +---------------------------------------+
24794 | reserved for compilers | 12 24
24795 +---------------------------------------+
24796 | reserved for binders | 16 32
24797 +---------------------------------------+
24798 | saved TOC pointer | 20 40
24799 +---------------------------------------+
24800 | Parameter save area (P) | 24 48
24801 +---------------------------------------+
24802 | Alloca space (A) | 24+P etc.
24803 +---------------------------------------+
24804 | Local variable space (L) | 24+P+A
24805 +---------------------------------------+
24806 | Float/int conversion temporary (X) | 24+P+A+L
24807 +---------------------------------------+
24808 | Save area for AltiVec registers (W) | 24+P+A+L+X
24809 +---------------------------------------+
24810 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24811 +---------------------------------------+
24812 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24813 +---------------------------------------+
24814 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24815 +---------------------------------------+
24816 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24817 +---------------------------------------+
24818 old SP->| back chain to caller's caller |
24819 +---------------------------------------+
24821 The required alignment for AIX configurations is two words (i.e., 8
24822 or 16 bytes).
24824 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24826 SP----> +---------------------------------------+
24827 | Back chain to caller | 0
24828 +---------------------------------------+
24829 | Save area for CR | 8
24830 +---------------------------------------+
24831 | Saved LR | 16
24832 +---------------------------------------+
24833 | Saved TOC pointer | 24
24834 +---------------------------------------+
24835 | Parameter save area (P) | 32
24836 +---------------------------------------+
24837 | Alloca space (A) | 32+P
24838 +---------------------------------------+
24839 | Local variable space (L) | 32+P+A
24840 +---------------------------------------+
24841 | Save area for AltiVec registers (W) | 32+P+A+L
24842 +---------------------------------------+
24843 | AltiVec alignment padding (Y) | 32+P+A+L+W
24844 +---------------------------------------+
24845 | Save area for GP registers (G) | 32+P+A+L+W+Y
24846 +---------------------------------------+
24847 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24848 +---------------------------------------+
24849 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24850 +---------------------------------------+
24853 V.4 stack frames look like:
24855 SP----> +---------------------------------------+
24856 | back chain to caller | 0
24857 +---------------------------------------+
24858 | caller's saved LR | 4
24859 +---------------------------------------+
24860 | Parameter save area (P) | 8
24861 +---------------------------------------+
24862 | Alloca space (A) | 8+P
24863 +---------------------------------------+
24864 | Varargs save area (V) | 8+P+A
24865 +---------------------------------------+
24866 | Local variable space (L) | 8+P+A+V
24867 +---------------------------------------+
24868 | Float/int conversion temporary (X) | 8+P+A+V+L
24869 +---------------------------------------+
24870 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24871 +---------------------------------------+
24872 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24873 +---------------------------------------+
24874 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24875 +---------------------------------------+
24876 | SPE: area for 64-bit GP registers |
24877 +---------------------------------------+
24878 | SPE alignment padding |
24879 +---------------------------------------+
24880 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24881 +---------------------------------------+
24882 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24883 +---------------------------------------+
24884 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24885 +---------------------------------------+
24886 old SP->| back chain to caller's caller |
24887 +---------------------------------------+
24889 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24890 given. (But note below and in sysv4.h that we require only 8 and
24891 may round up the size of our stack frame anyways. The historical
24892 reason is early versions of powerpc-linux which didn't properly
24893 align the stack at program startup. A happy side-effect is that
24894 -mno-eabi libraries can be used with -meabi programs.)
24896 The EABI configuration defaults to the V.4 layout. However,
24897 the stack alignment requirements may differ. If -mno-eabi is not
24898 given, the required stack alignment is 8 bytes; if -mno-eabi is
24899 given, the required alignment is 16 bytes. (But see V.4 comment
24900 above.) */
24902 #ifndef ABI_STACK_BOUNDARY
24903 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24904 #endif
24906 static rs6000_stack_t *
24907 rs6000_stack_info (void)
24909 /* We should never be called for thunks, we are not set up for that. */
24910 gcc_assert (!cfun->is_thunk);
24912 rs6000_stack_t *info = &stack_info;
24913 int reg_size = TARGET_32BIT ? 4 : 8;
24914 int ehrd_size;
24915 int ehcr_size;
24916 int save_align;
24917 int first_gp;
24918 HOST_WIDE_INT non_fixed_size;
24919 bool using_static_chain_p;
24921 if (reload_completed && info->reload_completed)
24922 return info;
24924 memset (info, 0, sizeof (*info));
24925 info->reload_completed = reload_completed;
24927 if (TARGET_SPE)
24929 /* Cache value so we don't rescan instruction chain over and over. */
24930 if (cfun->machine->spe_insn_chain_scanned_p == 0)
24931 cfun->machine->spe_insn_chain_scanned_p
24932 = spe_func_has_64bit_regs_p () + 1;
24933 info->spe_64bit_regs_used = cfun->machine->spe_insn_chain_scanned_p - 1;
24936 /* Select which calling sequence. */
24937 info->abi = DEFAULT_ABI;
24939 /* Calculate which registers need to be saved & save area size. */
24940 info->first_gp_reg_save = first_reg_to_save ();
24941 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24942 even if it currently looks like we won't. Reload may need it to
24943 get at a constant; if so, it will have already created a constant
24944 pool entry for it. */
24945 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24946 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24947 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24948 && crtl->uses_const_pool
24949 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24950 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24951 else
24952 first_gp = info->first_gp_reg_save;
24954 info->gp_size = reg_size * (32 - first_gp);
24956 /* For the SPE, we have an additional upper 32-bits on each GPR.
24957 Ideally we should save the entire 64-bits only when the upper
24958 half is used in SIMD instructions. Since we only record
24959 registers live (not the size they are used in), this proves
24960 difficult because we'd have to traverse the instruction chain at
24961 the right time, taking reload into account. This is a real pain,
24962 so we opt to save the GPRs in 64-bits always if but one register
24963 gets used in 64-bits. Otherwise, all the registers in the frame
24964 get saved in 32-bits.
24966 So... since when we save all GPRs (except the SP) in 64-bits, the
24967 traditional GP save area will be empty. */
24968 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24969 info->gp_size = 0;
24971 info->first_fp_reg_save = first_fp_reg_to_save ();
24972 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24974 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24975 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24976 - info->first_altivec_reg_save);
24978 /* Does this function call anything? */
24979 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24981 /* Determine if we need to save the condition code registers. */
24982 if (save_reg_p (CR2_REGNO)
24983 || save_reg_p (CR3_REGNO)
24984 || save_reg_p (CR4_REGNO))
24986 info->cr_save_p = 1;
24987 if (DEFAULT_ABI == ABI_V4)
24988 info->cr_size = reg_size;
24991 /* If the current function calls __builtin_eh_return, then we need
24992 to allocate stack space for registers that will hold data for
24993 the exception handler. */
24994 if (crtl->calls_eh_return)
24996 unsigned int i;
24997 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24998 continue;
25000 /* SPE saves EH registers in 64-bits. */
25001 ehrd_size = i * (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0
25002 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
25004 else
25005 ehrd_size = 0;
25007 /* In the ELFv2 ABI, we also need to allocate space for separate
25008 CR field save areas if the function calls __builtin_eh_return. */
25009 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
25011 /* This hard-codes that we have three call-saved CR fields. */
25012 ehcr_size = 3 * reg_size;
25013 /* We do *not* use the regular CR save mechanism. */
25014 info->cr_save_p = 0;
25016 else
25017 ehcr_size = 0;
25019 /* Determine various sizes. */
25020 info->reg_size = reg_size;
25021 info->fixed_size = RS6000_SAVE_AREA;
25022 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
25023 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
25024 TARGET_ALTIVEC ? 16 : 8);
25025 if (FRAME_GROWS_DOWNWARD)
25026 info->vars_size
25027 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
25028 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
25029 - (info->fixed_size + info->vars_size + info->parm_size);
25031 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
25032 info->spe_gp_size = 8 * (32 - first_gp);
25034 if (TARGET_ALTIVEC_ABI)
25035 info->vrsave_mask = compute_vrsave_mask ();
25037 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
25038 info->vrsave_size = 4;
25040 compute_save_world_info (info);
25042 /* Calculate the offsets. */
25043 switch (DEFAULT_ABI)
25045 case ABI_NONE:
25046 default:
25047 gcc_unreachable ();
25049 case ABI_AIX:
25050 case ABI_ELFv2:
25051 case ABI_DARWIN:
25052 info->fp_save_offset = -info->fp_size;
25053 info->gp_save_offset = info->fp_save_offset - info->gp_size;
25055 if (TARGET_ALTIVEC_ABI)
25057 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
25059 /* Align stack so vector save area is on a quadword boundary.
25060 The padding goes above the vectors. */
25061 if (info->altivec_size != 0)
25062 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
25064 info->altivec_save_offset = info->vrsave_save_offset
25065 - info->altivec_padding_size
25066 - info->altivec_size;
25067 gcc_assert (info->altivec_size == 0
25068 || info->altivec_save_offset % 16 == 0);
25070 /* Adjust for AltiVec case. */
25071 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
25073 else
25074 info->ehrd_offset = info->gp_save_offset - ehrd_size;
25076 info->ehcr_offset = info->ehrd_offset - ehcr_size;
25077 info->cr_save_offset = reg_size; /* first word when 64-bit. */
25078 info->lr_save_offset = 2*reg_size;
25079 break;
25081 case ABI_V4:
25082 info->fp_save_offset = -info->fp_size;
25083 info->gp_save_offset = info->fp_save_offset - info->gp_size;
25084 info->cr_save_offset = info->gp_save_offset - info->cr_size;
25086 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
25088 /* Align stack so SPE GPR save area is aligned on a
25089 double-word boundary. */
25090 if (info->spe_gp_size != 0 && info->cr_save_offset != 0)
25091 info->spe_padding_size = 8 - (-info->cr_save_offset % 8);
25092 else
25093 info->spe_padding_size = 0;
25095 info->spe_gp_save_offset = info->cr_save_offset
25096 - info->spe_padding_size
25097 - info->spe_gp_size;
25099 /* Adjust for SPE case. */
25100 info->ehrd_offset = info->spe_gp_save_offset;
25102 else if (TARGET_ALTIVEC_ABI)
25104 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
25106 /* Align stack so vector save area is on a quadword boundary. */
25107 if (info->altivec_size != 0)
25108 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
25110 info->altivec_save_offset = info->vrsave_save_offset
25111 - info->altivec_padding_size
25112 - info->altivec_size;
25114 /* Adjust for AltiVec case. */
25115 info->ehrd_offset = info->altivec_save_offset;
25117 else
25118 info->ehrd_offset = info->cr_save_offset;
25120 info->ehrd_offset -= ehrd_size;
25121 info->lr_save_offset = reg_size;
25124 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
25125 info->save_size = RS6000_ALIGN (info->fp_size
25126 + info->gp_size
25127 + info->altivec_size
25128 + info->altivec_padding_size
25129 + info->spe_gp_size
25130 + info->spe_padding_size
25131 + ehrd_size
25132 + ehcr_size
25133 + info->cr_size
25134 + info->vrsave_size,
25135 save_align);
25137 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
25139 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
25140 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
25142 /* Determine if we need to save the link register. */
25143 if (info->calls_p
25144 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25145 && crtl->profile
25146 && !TARGET_PROFILE_KERNEL)
25147 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
25148 #ifdef TARGET_RELOCATABLE
25149 || (DEFAULT_ABI == ABI_V4
25150 && (TARGET_RELOCATABLE || flag_pic > 1)
25151 && get_pool_size () != 0)
25152 #endif
25153 || rs6000_ra_ever_killed ())
25154 info->lr_save_p = 1;
25156 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
25157 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
25158 && call_used_regs[STATIC_CHAIN_REGNUM]);
25159 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
25161 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
25162 || !(info->savres_strategy & SAVE_INLINE_FPRS)
25163 || !(info->savres_strategy & SAVE_INLINE_VRS)
25164 || !(info->savres_strategy & REST_INLINE_GPRS)
25165 || !(info->savres_strategy & REST_INLINE_FPRS)
25166 || !(info->savres_strategy & REST_INLINE_VRS))
25167 info->lr_save_p = 1;
25169 if (info->lr_save_p)
25170 df_set_regs_ever_live (LR_REGNO, true);
25172 /* Determine if we need to allocate any stack frame:
25174 For AIX we need to push the stack if a frame pointer is needed
25175 (because the stack might be dynamically adjusted), if we are
25176 debugging, if we make calls, or if the sum of fp_save, gp_save,
25177 and local variables are more than the space needed to save all
25178 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
25179 + 18*8 = 288 (GPR13 reserved).
25181 For V.4 we don't have the stack cushion that AIX uses, but assume
25182 that the debugger can handle stackless frames. */
25184 if (info->calls_p)
25185 info->push_p = 1;
25187 else if (DEFAULT_ABI == ABI_V4)
25188 info->push_p = non_fixed_size != 0;
25190 else if (frame_pointer_needed)
25191 info->push_p = 1;
25193 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
25194 info->push_p = 1;
25196 else
25197 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
25199 return info;
25202 /* Return true if the current function uses any GPRs in 64-bit SIMD
25203 mode. */
25205 static bool
25206 spe_func_has_64bit_regs_p (void)
25208 rtx_insn *insns, *insn;
25210 /* Functions that save and restore all the call-saved registers will
25211 need to save/restore the registers in 64-bits. */
25212 if (crtl->calls_eh_return
25213 || cfun->calls_setjmp
25214 || crtl->has_nonlocal_goto)
25215 return true;
25217 insns = get_insns ();
25219 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
25221 if (INSN_P (insn))
25223 rtx i;
25225 /* FIXME: This should be implemented with attributes...
25227 (set_attr "spe64" "true")....then,
25228 if (get_spe64(insn)) return true;
25230 It's the only reliable way to do the stuff below. */
25232 i = PATTERN (insn);
25233 if (GET_CODE (i) == SET)
25235 machine_mode mode = GET_MODE (SET_SRC (i));
25237 if (SPE_VECTOR_MODE (mode))
25238 return true;
25239 if (TARGET_E500_DOUBLE
25240 && (mode == DFmode || FLOAT128_2REG_P (mode)))
25241 return true;
25246 return false;
25249 static void
25250 debug_stack_info (rs6000_stack_t *info)
25252 const char *abi_string;
25254 if (! info)
25255 info = rs6000_stack_info ();
25257 fprintf (stderr, "\nStack information for function %s:\n",
25258 ((current_function_decl && DECL_NAME (current_function_decl))
25259 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
25260 : "<unknown>"));
25262 switch (info->abi)
25264 default: abi_string = "Unknown"; break;
25265 case ABI_NONE: abi_string = "NONE"; break;
25266 case ABI_AIX: abi_string = "AIX"; break;
25267 case ABI_ELFv2: abi_string = "ELFv2"; break;
25268 case ABI_DARWIN: abi_string = "Darwin"; break;
25269 case ABI_V4: abi_string = "V.4"; break;
25272 fprintf (stderr, "\tABI = %5s\n", abi_string);
25274 if (TARGET_ALTIVEC_ABI)
25275 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
25277 if (TARGET_SPE_ABI)
25278 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
25280 if (info->first_gp_reg_save != 32)
25281 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
25283 if (info->first_fp_reg_save != 64)
25284 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
25286 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
25287 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
25288 info->first_altivec_reg_save);
25290 if (info->lr_save_p)
25291 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
25293 if (info->cr_save_p)
25294 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
25296 if (info->vrsave_mask)
25297 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
25299 if (info->push_p)
25300 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
25302 if (info->calls_p)
25303 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
25305 if (info->gp_size)
25306 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
25308 if (info->fp_size)
25309 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
25311 if (info->altivec_size)
25312 fprintf (stderr, "\taltivec_save_offset = %5d\n",
25313 info->altivec_save_offset);
25315 if (info->spe_gp_size)
25316 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
25317 info->spe_gp_save_offset);
25319 if (info->vrsave_size)
25320 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
25321 info->vrsave_save_offset);
25323 if (info->lr_save_p)
25324 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
25326 if (info->cr_save_p)
25327 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25329 if (info->varargs_save_offset)
25330 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25332 if (info->total_size)
25333 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25334 info->total_size);
25336 if (info->vars_size)
25337 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25338 info->vars_size);
25340 if (info->parm_size)
25341 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25343 if (info->fixed_size)
25344 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25346 if (info->gp_size)
25347 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25349 if (info->spe_gp_size)
25350 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
25352 if (info->fp_size)
25353 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25355 if (info->altivec_size)
25356 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25358 if (info->vrsave_size)
25359 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25361 if (info->altivec_padding_size)
25362 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25363 info->altivec_padding_size);
25365 if (info->spe_padding_size)
25366 fprintf (stderr, "\tspe_padding_size = %5d\n",
25367 info->spe_padding_size);
25369 if (info->cr_size)
25370 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25372 if (info->save_size)
25373 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25375 if (info->reg_size != 4)
25376 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25378 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25380 fprintf (stderr, "\n");
25384 rs6000_return_addr (int count, rtx frame)
25386 /* Currently we don't optimize very well between prolog and body
25387 code and for PIC code the code can be actually quite bad, so
25388 don't try to be too clever here. */
25389 if (count != 0
25390 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25392 cfun->machine->ra_needs_full_frame = 1;
25394 return
25395 gen_rtx_MEM
25396 (Pmode,
25397 memory_address
25398 (Pmode,
25399 plus_constant (Pmode,
25400 copy_to_reg
25401 (gen_rtx_MEM (Pmode,
25402 memory_address (Pmode, frame))),
25403 RETURN_ADDRESS_OFFSET)));
25406 cfun->machine->ra_need_lr = 1;
25407 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25410 /* Say whether a function is a candidate for sibcall handling or not. */
25412 static bool
25413 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25415 tree fntype;
25417 if (decl)
25418 fntype = TREE_TYPE (decl);
25419 else
25420 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25422 /* We can't do it if the called function has more vector parameters
25423 than the current function; there's nowhere to put the VRsave code. */
25424 if (TARGET_ALTIVEC_ABI
25425 && TARGET_ALTIVEC_VRSAVE
25426 && !(decl && decl == current_function_decl))
25428 function_args_iterator args_iter;
25429 tree type;
25430 int nvreg = 0;
25432 /* Functions with vector parameters are required to have a
25433 prototype, so the argument type info must be available
25434 here. */
25435 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25436 if (TREE_CODE (type) == VECTOR_TYPE
25437 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25438 nvreg++;
25440 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25441 if (TREE_CODE (type) == VECTOR_TYPE
25442 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25443 nvreg--;
25445 if (nvreg > 0)
25446 return false;
25449 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25450 functions, because the callee may have a different TOC pointer to
25451 the caller and there's no way to ensure we restore the TOC when
25452 we return. With the secure-plt SYSV ABI we can't make non-local
25453 calls when -fpic/PIC because the plt call stubs use r30. */
25454 if (DEFAULT_ABI == ABI_DARWIN
25455 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25456 && decl
25457 && !DECL_EXTERNAL (decl)
25458 && !DECL_WEAK (decl)
25459 && (*targetm.binds_local_p) (decl))
25460 || (DEFAULT_ABI == ABI_V4
25461 && (!TARGET_SECURE_PLT
25462 || !flag_pic
25463 || (decl
25464 && (*targetm.binds_local_p) (decl)))))
25466 tree attr_list = TYPE_ATTRIBUTES (fntype);
25468 if (!lookup_attribute ("longcall", attr_list)
25469 || lookup_attribute ("shortcall", attr_list))
25470 return true;
25473 return false;
25476 static int
25477 rs6000_ra_ever_killed (void)
25479 rtx_insn *top;
25480 rtx reg;
25481 rtx_insn *insn;
25483 if (cfun->is_thunk)
25484 return 0;
25486 if (cfun->machine->lr_save_state)
25487 return cfun->machine->lr_save_state - 1;
25489 /* regs_ever_live has LR marked as used if any sibcalls are present,
25490 but this should not force saving and restoring in the
25491 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25492 clobbers LR, so that is inappropriate. */
25494 /* Also, the prologue can generate a store into LR that
25495 doesn't really count, like this:
25497 move LR->R0
25498 bcl to set PIC register
25499 move LR->R31
25500 move R0->LR
25502 When we're called from the epilogue, we need to avoid counting
25503 this as a store. */
25505 push_topmost_sequence ();
25506 top = get_insns ();
25507 pop_topmost_sequence ();
25508 reg = gen_rtx_REG (Pmode, LR_REGNO);
25510 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25512 if (INSN_P (insn))
25514 if (CALL_P (insn))
25516 if (!SIBLING_CALL_P (insn))
25517 return 1;
25519 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25520 return 1;
25521 else if (set_of (reg, insn) != NULL_RTX
25522 && !prologue_epilogue_contains (insn))
25523 return 1;
25526 return 0;
25529 /* Emit instructions needed to load the TOC register.
25530 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25531 a constant pool; or for SVR4 -fpic. */
25533 void
25534 rs6000_emit_load_toc_table (int fromprolog)
25536 rtx dest;
25537 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25539 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25541 char buf[30];
25542 rtx lab, tmp1, tmp2, got;
25544 lab = gen_label_rtx ();
25545 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25546 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25547 if (flag_pic == 2)
25549 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25550 need_toc_init = 1;
25552 else
25553 got = rs6000_got_sym ();
25554 tmp1 = tmp2 = dest;
25555 if (!fromprolog)
25557 tmp1 = gen_reg_rtx (Pmode);
25558 tmp2 = gen_reg_rtx (Pmode);
25560 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25561 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25562 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25563 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25565 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25567 emit_insn (gen_load_toc_v4_pic_si ());
25568 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25570 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25572 char buf[30];
25573 rtx temp0 = (fromprolog
25574 ? gen_rtx_REG (Pmode, 0)
25575 : gen_reg_rtx (Pmode));
25577 if (fromprolog)
25579 rtx symF, symL;
25581 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25582 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25584 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25585 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25587 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25588 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25589 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25591 else
25593 rtx tocsym, lab;
25595 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25596 need_toc_init = 1;
25597 lab = gen_label_rtx ();
25598 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25599 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25600 if (TARGET_LINK_STACK)
25601 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25602 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25604 emit_insn (gen_addsi3 (dest, temp0, dest));
25606 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25608 /* This is for AIX code running in non-PIC ELF32. */
25609 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25611 need_toc_init = 1;
25612 emit_insn (gen_elf_high (dest, realsym));
25613 emit_insn (gen_elf_low (dest, dest, realsym));
25615 else
25617 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25619 if (TARGET_32BIT)
25620 emit_insn (gen_load_toc_aix_si (dest));
25621 else
25622 emit_insn (gen_load_toc_aix_di (dest));
25626 /* Emit instructions to restore the link register after determining where
25627 its value has been stored. */
25629 void
25630 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25632 rs6000_stack_t *info = rs6000_stack_info ();
25633 rtx operands[2];
25635 operands[0] = source;
25636 operands[1] = scratch;
25638 if (info->lr_save_p)
25640 rtx frame_rtx = stack_pointer_rtx;
25641 HOST_WIDE_INT sp_offset = 0;
25642 rtx tmp;
25644 if (frame_pointer_needed
25645 || cfun->calls_alloca
25646 || info->total_size > 32767)
25648 tmp = gen_frame_mem (Pmode, frame_rtx);
25649 emit_move_insn (operands[1], tmp);
25650 frame_rtx = operands[1];
25652 else if (info->push_p)
25653 sp_offset = info->total_size;
25655 tmp = plus_constant (Pmode, frame_rtx,
25656 info->lr_save_offset + sp_offset);
25657 tmp = gen_frame_mem (Pmode, tmp);
25658 emit_move_insn (tmp, operands[0]);
25660 else
25661 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25663 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25664 state of lr_save_p so any change from here on would be a bug. In
25665 particular, stop rs6000_ra_ever_killed from considering the SET
25666 of lr we may have added just above. */
25667 cfun->machine->lr_save_state = info->lr_save_p + 1;
25670 static GTY(()) alias_set_type set = -1;
25672 alias_set_type
25673 get_TOC_alias_set (void)
25675 if (set == -1)
25676 set = new_alias_set ();
25677 return set;
25680 /* This returns nonzero if the current function uses the TOC. This is
25681 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25682 is generated by the ABI_V4 load_toc_* patterns. */
25683 #if TARGET_ELF
25684 static int
25685 uses_TOC (void)
25687 rtx_insn *insn;
25689 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25690 if (INSN_P (insn))
25692 rtx pat = PATTERN (insn);
25693 int i;
25695 if (GET_CODE (pat) == PARALLEL)
25696 for (i = 0; i < XVECLEN (pat, 0); i++)
25698 rtx sub = XVECEXP (pat, 0, i);
25699 if (GET_CODE (sub) == USE)
25701 sub = XEXP (sub, 0);
25702 if (GET_CODE (sub) == UNSPEC
25703 && XINT (sub, 1) == UNSPEC_TOC)
25704 return 1;
25708 return 0;
25710 #endif
25713 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25715 rtx tocrel, tocreg, hi;
25717 if (TARGET_DEBUG_ADDR)
25719 if (GET_CODE (symbol) == SYMBOL_REF)
25720 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25721 XSTR (symbol, 0));
25722 else
25724 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25725 GET_RTX_NAME (GET_CODE (symbol)));
25726 debug_rtx (symbol);
25730 if (!can_create_pseudo_p ())
25731 df_set_regs_ever_live (TOC_REGISTER, true);
25733 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25734 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25735 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25736 return tocrel;
25738 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25739 if (largetoc_reg != NULL)
25741 emit_move_insn (largetoc_reg, hi);
25742 hi = largetoc_reg;
25744 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25747 /* Issue assembly directives that create a reference to the given DWARF
25748 FRAME_TABLE_LABEL from the current function section. */
25749 void
25750 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25752 fprintf (asm_out_file, "\t.ref %s\n",
25753 (* targetm.strip_name_encoding) (frame_table_label));
25756 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25757 and the change to the stack pointer. */
25759 static void
25760 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25762 rtvec p;
25763 int i;
25764 rtx regs[3];
25766 i = 0;
25767 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25768 if (hard_frame_needed)
25769 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25770 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25771 || (hard_frame_needed
25772 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25773 regs[i++] = fp;
25775 p = rtvec_alloc (i);
25776 while (--i >= 0)
25778 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25779 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25782 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25785 /* Emit the correct code for allocating stack space, as insns.
25786 If COPY_REG, make sure a copy of the old frame is left there.
25787 The generated code may use hard register 0 as a temporary. */
25789 static rtx_insn *
25790 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25792 rtx_insn *insn;
25793 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25794 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25795 rtx todec = gen_int_mode (-size, Pmode);
25796 rtx par, set, mem;
25798 if (INTVAL (todec) != -size)
25800 warning (0, "stack frame too large");
25801 emit_insn (gen_trap ());
25802 return 0;
25805 if (crtl->limit_stack)
25807 if (REG_P (stack_limit_rtx)
25808 && REGNO (stack_limit_rtx) > 1
25809 && REGNO (stack_limit_rtx) <= 31)
25811 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
25812 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25813 const0_rtx));
25815 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25816 && TARGET_32BIT
25817 && DEFAULT_ABI == ABI_V4)
25819 rtx toload = gen_rtx_CONST (VOIDmode,
25820 gen_rtx_PLUS (Pmode,
25821 stack_limit_rtx,
25822 GEN_INT (size)));
25824 emit_insn (gen_elf_high (tmp_reg, toload));
25825 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25826 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25827 const0_rtx));
25829 else
25830 warning (0, "stack limit expression is not supported");
25833 if (copy_reg)
25835 if (copy_off != 0)
25836 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25837 else
25838 emit_move_insn (copy_reg, stack_reg);
25841 if (size > 32767)
25843 /* Need a note here so that try_split doesn't get confused. */
25844 if (get_last_insn () == NULL_RTX)
25845 emit_note (NOTE_INSN_DELETED);
25846 insn = emit_move_insn (tmp_reg, todec);
25847 try_split (PATTERN (insn), insn, 0);
25848 todec = tmp_reg;
25851 insn = emit_insn (TARGET_32BIT
25852 ? gen_movsi_update_stack (stack_reg, stack_reg,
25853 todec, stack_reg)
25854 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25855 todec, stack_reg));
25856 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25857 it now and set the alias set/attributes. The above gen_*_update
25858 calls will generate a PARALLEL with the MEM set being the first
25859 operation. */
25860 par = PATTERN (insn);
25861 gcc_assert (GET_CODE (par) == PARALLEL);
25862 set = XVECEXP (par, 0, 0);
25863 gcc_assert (GET_CODE (set) == SET);
25864 mem = SET_DEST (set);
25865 gcc_assert (MEM_P (mem));
25866 MEM_NOTRAP_P (mem) = 1;
25867 set_mem_alias_set (mem, get_frame_alias_set ());
25869 RTX_FRAME_RELATED_P (insn) = 1;
25870 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25871 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25872 GEN_INT (-size))));
25873 return insn;
25876 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25878 #if PROBE_INTERVAL > 32768
25879 #error Cannot use indexed addressing mode for stack probing
25880 #endif
25882 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25883 inclusive. These are offsets from the current stack pointer. */
25885 static void
25886 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25888 /* See if we have a constant small number of probes to generate. If so,
25889 that's the easy case. */
25890 if (first + size <= 32768)
25892 HOST_WIDE_INT i;
25894 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25895 it exceeds SIZE. If only one probe is needed, this will not
25896 generate any code. Then probe at FIRST + SIZE. */
25897 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25898 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25899 -(first + i)));
25901 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25902 -(first + size)));
25905 /* Otherwise, do the same as above, but in a loop. Note that we must be
25906 extra careful with variables wrapping around because we might be at
25907 the very top (or the very bottom) of the address space and we have
25908 to be able to handle this case properly; in particular, we use an
25909 equality test for the loop condition. */
25910 else
25912 HOST_WIDE_INT rounded_size;
25913 rtx r12 = gen_rtx_REG (Pmode, 12);
25914 rtx r0 = gen_rtx_REG (Pmode, 0);
25916 /* Sanity check for the addressing mode we're going to use. */
25917 gcc_assert (first <= 32768);
25919 /* Step 1: round SIZE to the previous multiple of the interval. */
25921 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25924 /* Step 2: compute initial and final value of the loop counter. */
25926 /* TEST_ADDR = SP + FIRST. */
25927 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25928 -first)));
25930 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25931 if (rounded_size > 32768)
25933 emit_move_insn (r0, GEN_INT (-rounded_size));
25934 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25936 else
25937 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25938 -rounded_size)));
25941 /* Step 3: the loop
25945 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25946 probe at TEST_ADDR
25948 while (TEST_ADDR != LAST_ADDR)
25950 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25951 until it is equal to ROUNDED_SIZE. */
25953 if (TARGET_64BIT)
25954 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25955 else
25956 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25959 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25960 that SIZE is equal to ROUNDED_SIZE. */
25962 if (size != rounded_size)
25963 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25967 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25968 absolute addresses. */
25970 const char *
25971 output_probe_stack_range (rtx reg1, rtx reg2)
25973 static int labelno = 0;
25974 char loop_lab[32];
25975 rtx xops[2];
25977 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25979 /* Loop. */
25980 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25982 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25983 xops[0] = reg1;
25984 xops[1] = GEN_INT (-PROBE_INTERVAL);
25985 output_asm_insn ("addi %0,%0,%1", xops);
25987 /* Probe at TEST_ADDR. */
25988 xops[1] = gen_rtx_REG (Pmode, 0);
25989 output_asm_insn ("stw %1,0(%0)", xops);
25991 /* Test if TEST_ADDR == LAST_ADDR. */
25992 xops[1] = reg2;
25993 if (TARGET_64BIT)
25994 output_asm_insn ("cmpd 0,%0,%1", xops);
25995 else
25996 output_asm_insn ("cmpw 0,%0,%1", xops);
25998 /* Branch. */
25999 fputs ("\tbne 0,", asm_out_file);
26000 assemble_name_raw (asm_out_file, loop_lab);
26001 fputc ('\n', asm_out_file);
26003 return "";
26006 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
26007 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
26008 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
26009 deduce these equivalences by itself so it wasn't necessary to hold
26010 its hand so much. Don't be tempted to always supply d2_f_d_e with
26011 the actual cfa register, ie. r31 when we are using a hard frame
26012 pointer. That fails when saving regs off r1, and sched moves the
26013 r31 setup past the reg saves. */
26015 static rtx
26016 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
26017 rtx reg2, rtx repl2)
26019 rtx repl;
26021 if (REGNO (reg) == STACK_POINTER_REGNUM)
26023 gcc_checking_assert (val == 0);
26024 repl = NULL_RTX;
26026 else
26027 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26028 GEN_INT (val));
26030 rtx pat = PATTERN (insn);
26031 if (!repl && !reg2)
26033 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26034 if (GET_CODE (pat) == PARALLEL)
26035 for (int i = 0; i < XVECLEN (pat, 0); i++)
26036 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26038 rtx set = XVECEXP (pat, 0, i);
26040 /* If this PARALLEL has been emitted for out-of-line
26041 register save functions, or store multiple, then omit
26042 eh_frame info for any user-defined global regs. If
26043 eh_frame info is supplied, frame unwinding will
26044 restore a user reg. */
26045 if (!REG_P (SET_SRC (set))
26046 || !fixed_reg_p (REGNO (SET_SRC (set))))
26047 RTX_FRAME_RELATED_P (set) = 1;
26049 RTX_FRAME_RELATED_P (insn) = 1;
26050 return insn;
26053 /* We expect that 'pat' is either a SET or a PARALLEL containing
26054 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26055 are important so they all have to be marked RTX_FRAME_RELATED_P.
26056 Call simplify_replace_rtx on the SETs rather than the whole insn
26057 so as to leave the other stuff alone (for example USE of r12). */
26059 if (GET_CODE (pat) == SET)
26061 if (repl)
26062 pat = simplify_replace_rtx (pat, reg, repl);
26063 if (reg2)
26064 pat = simplify_replace_rtx (pat, reg2, repl2);
26066 else if (GET_CODE (pat) == PARALLEL)
26068 pat = shallow_copy_rtx (pat);
26069 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
26071 for (int i = 0; i < XVECLEN (pat, 0); i++)
26072 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26074 rtx set = XVECEXP (pat, 0, i);
26076 if (repl)
26077 set = simplify_replace_rtx (set, reg, repl);
26078 if (reg2)
26079 set = simplify_replace_rtx (set, reg2, repl2);
26080 XVECEXP (pat, 0, i) = set;
26082 /* Omit eh_frame info for any user-defined global regs. */
26083 if (!REG_P (SET_SRC (set))
26084 || !fixed_reg_p (REGNO (SET_SRC (set))))
26085 RTX_FRAME_RELATED_P (set) = 1;
26088 else
26089 gcc_unreachable ();
26091 RTX_FRAME_RELATED_P (insn) = 1;
26092 if (repl || reg2)
26093 add_reg_note (insn, REG_FRAME_RELATED_EXPR, pat);
26095 return insn;
26098 /* Returns an insn that has a vrsave set operation with the
26099 appropriate CLOBBERs. */
26101 static rtx
26102 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26104 int nclobs, i;
26105 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26106 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26108 clobs[0]
26109 = gen_rtx_SET (vrsave,
26110 gen_rtx_UNSPEC_VOLATILE (SImode,
26111 gen_rtvec (2, reg, vrsave),
26112 UNSPECV_SET_VRSAVE));
26114 nclobs = 1;
26116 /* We need to clobber the registers in the mask so the scheduler
26117 does not move sets to VRSAVE before sets of AltiVec registers.
26119 However, if the function receives nonlocal gotos, reload will set
26120 all call saved registers live. We will end up with:
26122 (set (reg 999) (mem))
26123 (parallel [ (set (reg vrsave) (unspec blah))
26124 (clobber (reg 999))])
26126 The clobber will cause the store into reg 999 to be dead, and
26127 flow will attempt to delete an epilogue insn. In this case, we
26128 need an unspec use/set of the register. */
26130 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26131 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26133 if (!epiloguep || call_used_regs [i])
26134 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
26135 gen_rtx_REG (V4SImode, i));
26136 else
26138 rtx reg = gen_rtx_REG (V4SImode, i);
26140 clobs[nclobs++]
26141 = gen_rtx_SET (reg,
26142 gen_rtx_UNSPEC (V4SImode,
26143 gen_rtvec (1, reg), 27));
26147 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26149 for (i = 0; i < nclobs; ++i)
26150 XVECEXP (insn, 0, i) = clobs[i];
26152 return insn;
26155 static rtx
26156 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26158 rtx addr, mem;
26160 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26161 mem = gen_frame_mem (GET_MODE (reg), addr);
26162 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26165 static rtx
26166 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26168 return gen_frame_set (reg, frame_reg, offset, false);
26171 static rtx
26172 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26174 return gen_frame_set (reg, frame_reg, offset, true);
26177 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26178 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26180 static rtx
26181 emit_frame_save (rtx frame_reg, machine_mode mode,
26182 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26184 rtx reg, insn;
26186 /* Some cases that need register indexed addressing. */
26187 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26188 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
26189 || (TARGET_E500_DOUBLE && mode == DFmode)
26190 || (TARGET_SPE_ABI
26191 && SPE_VECTOR_MODE (mode)
26192 && !SPE_CONST_OFFSET_OK (offset))));
26194 reg = gen_rtx_REG (mode, regno);
26195 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26196 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26197 NULL_RTX, NULL_RTX);
26200 /* Emit an offset memory reference suitable for a frame store, while
26201 converting to a valid addressing mode. */
26203 static rtx
26204 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26206 rtx int_rtx, offset_rtx;
26208 int_rtx = GEN_INT (offset);
26210 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
26211 || (TARGET_E500_DOUBLE && mode == DFmode))
26213 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
26214 emit_move_insn (offset_rtx, int_rtx);
26216 else
26217 offset_rtx = int_rtx;
26219 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
26222 #ifndef TARGET_FIX_AND_CONTINUE
26223 #define TARGET_FIX_AND_CONTINUE 0
26224 #endif
26226 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26227 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26228 #define LAST_SAVRES_REGISTER 31
26229 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26231 enum {
26232 SAVRES_LR = 0x1,
26233 SAVRES_SAVE = 0x2,
26234 SAVRES_REG = 0x0c,
26235 SAVRES_GPR = 0,
26236 SAVRES_FPR = 4,
26237 SAVRES_VR = 8
26240 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26242 /* Temporary holding space for an out-of-line register save/restore
26243 routine name. */
26244 static char savres_routine_name[30];
26246 /* Return the name for an out-of-line register save/restore routine.
26247 We are saving/restoring GPRs if GPR is true. */
26249 static char *
26250 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
26252 const char *prefix = "";
26253 const char *suffix = "";
26255 /* Different targets are supposed to define
26256 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26257 routine name could be defined with:
26259 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26261 This is a nice idea in practice, but in reality, things are
26262 complicated in several ways:
26264 - ELF targets have save/restore routines for GPRs.
26266 - SPE targets use different prefixes for 32/64-bit registers, and
26267 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
26269 - PPC64 ELF targets have routines for save/restore of GPRs that
26270 differ in what they do with the link register, so having a set
26271 prefix doesn't work. (We only use one of the save routines at
26272 the moment, though.)
26274 - PPC32 elf targets have "exit" versions of the restore routines
26275 that restore the link register and can save some extra space.
26276 These require an extra suffix. (There are also "tail" versions
26277 of the restore routines and "GOT" versions of the save routines,
26278 but we don't generate those at present. Same problems apply,
26279 though.)
26281 We deal with all this by synthesizing our own prefix/suffix and
26282 using that for the simple sprintf call shown above. */
26283 if (TARGET_SPE)
26285 /* No floating point saves on the SPE. */
26286 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
26288 if ((sel & SAVRES_SAVE))
26289 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
26290 else
26291 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
26293 if ((sel & SAVRES_LR))
26294 suffix = "_x";
26296 else if (DEFAULT_ABI == ABI_V4)
26298 if (TARGET_64BIT)
26299 goto aix_names;
26301 if ((sel & SAVRES_REG) == SAVRES_GPR)
26302 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26303 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26304 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26305 else if ((sel & SAVRES_REG) == SAVRES_VR)
26306 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26307 else
26308 abort ();
26310 if ((sel & SAVRES_LR))
26311 suffix = "_x";
26313 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26315 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26316 /* No out-of-line save/restore routines for GPRs on AIX. */
26317 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26318 #endif
26320 aix_names:
26321 if ((sel & SAVRES_REG) == SAVRES_GPR)
26322 prefix = ((sel & SAVRES_SAVE)
26323 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26324 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26325 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26327 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26328 if ((sel & SAVRES_LR))
26329 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26330 else
26331 #endif
26333 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26334 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26337 else if ((sel & SAVRES_REG) == SAVRES_VR)
26338 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26339 else
26340 abort ();
26343 if (DEFAULT_ABI == ABI_DARWIN)
26345 /* The Darwin approach is (slightly) different, in order to be
26346 compatible with code generated by the system toolchain. There is a
26347 single symbol for the start of save sequence, and the code here
26348 embeds an offset into that code on the basis of the first register
26349 to be saved. */
26350 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26351 if ((sel & SAVRES_REG) == SAVRES_GPR)
26352 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26353 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26354 (regno - 13) * 4, prefix, regno);
26355 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26356 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26357 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26358 else if ((sel & SAVRES_REG) == SAVRES_VR)
26359 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26360 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26361 else
26362 abort ();
26364 else
26365 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26367 return savres_routine_name;
26370 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26371 We are saving/restoring GPRs if GPR is true. */
26373 static rtx
26374 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26376 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26377 ? info->first_gp_reg_save
26378 : (sel & SAVRES_REG) == SAVRES_FPR
26379 ? info->first_fp_reg_save - 32
26380 : (sel & SAVRES_REG) == SAVRES_VR
26381 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26382 : -1);
26383 rtx sym;
26384 int select = sel;
26386 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
26387 versions of the gpr routines. */
26388 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
26389 && info->spe_64bit_regs_used)
26390 select ^= SAVRES_FPR ^ SAVRES_GPR;
26392 /* Don't generate bogus routine names. */
26393 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26394 && regno <= LAST_SAVRES_REGISTER
26395 && select >= 0 && select <= 12);
26397 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26399 if (sym == NULL)
26401 char *name;
26403 name = rs6000_savres_routine_name (info, regno, sel);
26405 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26406 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26407 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26410 return sym;
26413 /* Emit a sequence of insns, including a stack tie if needed, for
26414 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26415 reset the stack pointer, but move the base of the frame into
26416 reg UPDT_REGNO for use by out-of-line register restore routines. */
26418 static rtx
26419 rs6000_emit_stack_reset (rs6000_stack_t *info,
26420 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26421 unsigned updt_regno)
26423 rtx updt_reg_rtx;
26425 /* This blockage is needed so that sched doesn't decide to move
26426 the sp change before the register restores. */
26427 if (DEFAULT_ABI == ABI_V4
26428 || (TARGET_SPE_ABI
26429 && info->spe_64bit_regs_used != 0
26430 && info->first_gp_reg_save != 32))
26431 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
26433 /* If we are restoring registers out-of-line, we will be using the
26434 "exit" variants of the restore routines, which will reset the
26435 stack for us. But we do need to point updt_reg into the
26436 right place for those routines. */
26437 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26439 if (frame_off != 0)
26440 return emit_insn (gen_add3_insn (updt_reg_rtx,
26441 frame_reg_rtx, GEN_INT (frame_off)));
26442 else if (REGNO (frame_reg_rtx) != updt_regno)
26443 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26445 return NULL_RTX;
26448 /* Return the register number used as a pointer by out-of-line
26449 save/restore functions. */
26451 static inline unsigned
26452 ptr_regno_for_savres (int sel)
26454 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26455 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26456 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26459 /* Construct a parallel rtx describing the effect of a call to an
26460 out-of-line register save/restore routine, and emit the insn
26461 or jump_insn as appropriate. */
26463 static rtx
26464 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26465 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26466 machine_mode reg_mode, int sel)
26468 int i;
26469 int offset, start_reg, end_reg, n_regs, use_reg;
26470 int reg_size = GET_MODE_SIZE (reg_mode);
26471 rtx sym;
26472 rtvec p;
26473 rtx par, insn;
26475 offset = 0;
26476 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26477 ? info->first_gp_reg_save
26478 : (sel & SAVRES_REG) == SAVRES_FPR
26479 ? info->first_fp_reg_save
26480 : (sel & SAVRES_REG) == SAVRES_VR
26481 ? info->first_altivec_reg_save
26482 : -1);
26483 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26484 ? 32
26485 : (sel & SAVRES_REG) == SAVRES_FPR
26486 ? 64
26487 : (sel & SAVRES_REG) == SAVRES_VR
26488 ? LAST_ALTIVEC_REGNO + 1
26489 : -1);
26490 n_regs = end_reg - start_reg;
26491 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26492 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26493 + n_regs);
26495 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26496 RTVEC_ELT (p, offset++) = ret_rtx;
26498 RTVEC_ELT (p, offset++)
26499 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26501 sym = rs6000_savres_routine_sym (info, sel);
26502 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26504 use_reg = ptr_regno_for_savres (sel);
26505 if ((sel & SAVRES_REG) == SAVRES_VR)
26507 /* Vector regs are saved/restored using [reg+reg] addressing. */
26508 RTVEC_ELT (p, offset++)
26509 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26510 RTVEC_ELT (p, offset++)
26511 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26513 else
26514 RTVEC_ELT (p, offset++)
26515 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26517 for (i = 0; i < end_reg - start_reg; i++)
26518 RTVEC_ELT (p, i + offset)
26519 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26520 frame_reg_rtx, save_area_offset + reg_size * i,
26521 (sel & SAVRES_SAVE) != 0);
26523 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26524 RTVEC_ELT (p, i + offset)
26525 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26527 par = gen_rtx_PARALLEL (VOIDmode, p);
26529 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26531 insn = emit_jump_insn (par);
26532 JUMP_LABEL (insn) = ret_rtx;
26534 else
26535 insn = emit_insn (par);
26536 return insn;
26539 /* Emit code to store CR fields that need to be saved into REG. */
26541 static void
26542 rs6000_emit_move_from_cr (rtx reg)
26544 /* Only the ELFv2 ABI allows storing only selected fields. */
26545 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26547 int i, cr_reg[8], count = 0;
26549 /* Collect CR fields that must be saved. */
26550 for (i = 0; i < 8; i++)
26551 if (save_reg_p (CR0_REGNO + i))
26552 cr_reg[count++] = i;
26554 /* If it's just a single one, use mfcrf. */
26555 if (count == 1)
26557 rtvec p = rtvec_alloc (1);
26558 rtvec r = rtvec_alloc (2);
26559 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26560 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26561 RTVEC_ELT (p, 0)
26562 = gen_rtx_SET (reg,
26563 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26565 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26566 return;
26569 /* ??? It might be better to handle count == 2 / 3 cases here
26570 as well, using logical operations to combine the values. */
26573 emit_insn (gen_movesi_from_cr (reg));
26576 /* Return whether the split-stack arg pointer (r12) is used. */
26578 static bool
26579 split_stack_arg_pointer_used_p (void)
26581 /* If the pseudo holding the arg pointer is no longer a pseudo,
26582 then the arg pointer is used. */
26583 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26584 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26585 || (REGNO (cfun->machine->split_stack_arg_pointer)
26586 < FIRST_PSEUDO_REGISTER)))
26587 return true;
26589 /* Unfortunately we also need to do some code scanning, since
26590 r12 may have been substituted for the pseudo. */
26591 rtx_insn *insn;
26592 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26593 FOR_BB_INSNS (bb, insn)
26594 if (NONDEBUG_INSN_P (insn))
26596 /* A call destroys r12. */
26597 if (CALL_P (insn))
26598 return false;
26600 df_ref use;
26601 FOR_EACH_INSN_USE (use, insn)
26603 rtx x = DF_REF_REG (use);
26604 if (REG_P (x) && REGNO (x) == 12)
26605 return true;
26607 df_ref def;
26608 FOR_EACH_INSN_DEF (def, insn)
26610 rtx x = DF_REF_REG (def);
26611 if (REG_P (x) && REGNO (x) == 12)
26612 return false;
26615 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26618 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26620 static bool
26621 rs6000_global_entry_point_needed_p (void)
26623 /* Only needed for the ELFv2 ABI. */
26624 if (DEFAULT_ABI != ABI_ELFv2)
26625 return false;
26627 /* With -msingle-pic-base, we assume the whole program shares the same
26628 TOC, so no global entry point prologues are needed anywhere. */
26629 if (TARGET_SINGLE_PIC_BASE)
26630 return false;
26632 /* Ensure we have a global entry point for thunks. ??? We could
26633 avoid that if the target routine doesn't need a global entry point,
26634 but we do not know whether this is the case at this point. */
26635 if (cfun->is_thunk)
26636 return true;
26638 /* For regular functions, rs6000_emit_prologue sets this flag if the
26639 routine ever uses the TOC pointer. */
26640 return cfun->machine->r2_setup_needed;
26643 /* Emit function prologue as insns. */
26645 void
26646 rs6000_emit_prologue (void)
26648 rs6000_stack_t *info = rs6000_stack_info ();
26649 machine_mode reg_mode = Pmode;
26650 int reg_size = TARGET_32BIT ? 4 : 8;
26651 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26652 rtx frame_reg_rtx = sp_reg_rtx;
26653 unsigned int cr_save_regno;
26654 rtx cr_save_rtx = NULL_RTX;
26655 rtx insn;
26656 int strategy;
26657 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26658 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26659 && call_used_regs[STATIC_CHAIN_REGNUM]);
26660 int using_split_stack = (flag_split_stack
26661 && (lookup_attribute ("no_split_stack",
26662 DECL_ATTRIBUTES (cfun->decl))
26663 == NULL));
26665 /* Offset to top of frame for frame_reg and sp respectively. */
26666 HOST_WIDE_INT frame_off = 0;
26667 HOST_WIDE_INT sp_off = 0;
26668 /* sp_adjust is the stack adjusting instruction, tracked so that the
26669 insn setting up the split-stack arg pointer can be emitted just
26670 prior to it, when r12 is not used here for other purposes. */
26671 rtx_insn *sp_adjust = 0;
26673 #if CHECKING_P
26674 /* Track and check usage of r0, r11, r12. */
26675 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26676 #define START_USE(R) do \
26678 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26679 reg_inuse |= 1 << (R); \
26680 } while (0)
26681 #define END_USE(R) do \
26683 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26684 reg_inuse &= ~(1 << (R)); \
26685 } while (0)
26686 #define NOT_INUSE(R) do \
26688 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26689 } while (0)
26690 #else
26691 #define START_USE(R) do {} while (0)
26692 #define END_USE(R) do {} while (0)
26693 #define NOT_INUSE(R) do {} while (0)
26694 #endif
26696 if (DEFAULT_ABI == ABI_ELFv2
26697 && !TARGET_SINGLE_PIC_BASE)
26699 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26701 /* With -mminimal-toc we may generate an extra use of r2 below. */
26702 if (TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
26703 cfun->machine->r2_setup_needed = true;
26707 if (flag_stack_usage_info)
26708 current_function_static_stack_size = info->total_size;
26710 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26712 HOST_WIDE_INT size = info->total_size;
26714 if (crtl->is_leaf && !cfun->calls_alloca)
26716 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26717 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26718 size - STACK_CHECK_PROTECT);
26720 else if (size > 0)
26721 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26724 if (TARGET_FIX_AND_CONTINUE)
26726 /* gdb on darwin arranges to forward a function from the old
26727 address by modifying the first 5 instructions of the function
26728 to branch to the overriding function. This is necessary to
26729 permit function pointers that point to the old function to
26730 actually forward to the new function. */
26731 emit_insn (gen_nop ());
26732 emit_insn (gen_nop ());
26733 emit_insn (gen_nop ());
26734 emit_insn (gen_nop ());
26735 emit_insn (gen_nop ());
26738 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
26740 reg_mode = V2SImode;
26741 reg_size = 8;
26744 /* Handle world saves specially here. */
26745 if (WORLD_SAVE_P (info))
26747 int i, j, sz;
26748 rtx treg;
26749 rtvec p;
26750 rtx reg0;
26752 /* save_world expects lr in r0. */
26753 reg0 = gen_rtx_REG (Pmode, 0);
26754 if (info->lr_save_p)
26756 insn = emit_move_insn (reg0,
26757 gen_rtx_REG (Pmode, LR_REGNO));
26758 RTX_FRAME_RELATED_P (insn) = 1;
26761 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26762 assumptions about the offsets of various bits of the stack
26763 frame. */
26764 gcc_assert (info->gp_save_offset == -220
26765 && info->fp_save_offset == -144
26766 && info->lr_save_offset == 8
26767 && info->cr_save_offset == 4
26768 && info->push_p
26769 && info->lr_save_p
26770 && (!crtl->calls_eh_return
26771 || info->ehrd_offset == -432)
26772 && info->vrsave_save_offset == -224
26773 && info->altivec_save_offset == -416);
26775 treg = gen_rtx_REG (SImode, 11);
26776 emit_move_insn (treg, GEN_INT (-info->total_size));
26778 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26779 in R11. It also clobbers R12, so beware! */
26781 /* Preserve CR2 for save_world prologues */
26782 sz = 5;
26783 sz += 32 - info->first_gp_reg_save;
26784 sz += 64 - info->first_fp_reg_save;
26785 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26786 p = rtvec_alloc (sz);
26787 j = 0;
26788 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26789 gen_rtx_REG (SImode,
26790 LR_REGNO));
26791 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26792 gen_rtx_SYMBOL_REF (Pmode,
26793 "*save_world"));
26794 /* We do floats first so that the instruction pattern matches
26795 properly. */
26796 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26797 RTVEC_ELT (p, j++)
26798 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26799 ? DFmode : SFmode,
26800 info->first_fp_reg_save + i),
26801 frame_reg_rtx,
26802 info->fp_save_offset + frame_off + 8 * i);
26803 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26804 RTVEC_ELT (p, j++)
26805 = gen_frame_store (gen_rtx_REG (V4SImode,
26806 info->first_altivec_reg_save + i),
26807 frame_reg_rtx,
26808 info->altivec_save_offset + frame_off + 16 * i);
26809 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26810 RTVEC_ELT (p, j++)
26811 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26812 frame_reg_rtx,
26813 info->gp_save_offset + frame_off + reg_size * i);
26815 /* CR register traditionally saved as CR2. */
26816 RTVEC_ELT (p, j++)
26817 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26818 frame_reg_rtx, info->cr_save_offset + frame_off);
26819 /* Explain about use of R0. */
26820 if (info->lr_save_p)
26821 RTVEC_ELT (p, j++)
26822 = gen_frame_store (reg0,
26823 frame_reg_rtx, info->lr_save_offset + frame_off);
26824 /* Explain what happens to the stack pointer. */
26826 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26827 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26830 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26831 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26832 treg, GEN_INT (-info->total_size));
26833 sp_off = frame_off = info->total_size;
26836 strategy = info->savres_strategy;
26838 /* For V.4, update stack before we do any saving and set back pointer. */
26839 if (! WORLD_SAVE_P (info)
26840 && info->push_p
26841 && (DEFAULT_ABI == ABI_V4
26842 || crtl->calls_eh_return))
26844 bool need_r11 = (TARGET_SPE
26845 ? (!(strategy & SAVE_INLINE_GPRS)
26846 && info->spe_64bit_regs_used == 0)
26847 : (!(strategy & SAVE_INLINE_FPRS)
26848 || !(strategy & SAVE_INLINE_GPRS)
26849 || !(strategy & SAVE_INLINE_VRS)));
26850 int ptr_regno = -1;
26851 rtx ptr_reg = NULL_RTX;
26852 int ptr_off = 0;
26854 if (info->total_size < 32767)
26855 frame_off = info->total_size;
26856 else if (need_r11)
26857 ptr_regno = 11;
26858 else if (info->cr_save_p
26859 || info->lr_save_p
26860 || info->first_fp_reg_save < 64
26861 || info->first_gp_reg_save < 32
26862 || info->altivec_size != 0
26863 || info->vrsave_size != 0
26864 || crtl->calls_eh_return)
26865 ptr_regno = 12;
26866 else
26868 /* The prologue won't be saving any regs so there is no need
26869 to set up a frame register to access any frame save area.
26870 We also won't be using frame_off anywhere below, but set
26871 the correct value anyway to protect against future
26872 changes to this function. */
26873 frame_off = info->total_size;
26875 if (ptr_regno != -1)
26877 /* Set up the frame offset to that needed by the first
26878 out-of-line save function. */
26879 START_USE (ptr_regno);
26880 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26881 frame_reg_rtx = ptr_reg;
26882 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26883 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26884 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26885 ptr_off = info->gp_save_offset + info->gp_size;
26886 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26887 ptr_off = info->altivec_save_offset + info->altivec_size;
26888 frame_off = -ptr_off;
26890 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26891 ptr_reg, ptr_off);
26892 if (REGNO (frame_reg_rtx) == 12)
26893 sp_adjust = 0;
26894 sp_off = info->total_size;
26895 if (frame_reg_rtx != sp_reg_rtx)
26896 rs6000_emit_stack_tie (frame_reg_rtx, false);
26899 /* If we use the link register, get it into r0. */
26900 if (!WORLD_SAVE_P (info) && info->lr_save_p)
26902 rtx addr, reg, mem;
26904 reg = gen_rtx_REG (Pmode, 0);
26905 START_USE (0);
26906 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26907 RTX_FRAME_RELATED_P (insn) = 1;
26909 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26910 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26912 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26913 GEN_INT (info->lr_save_offset + frame_off));
26914 mem = gen_rtx_MEM (Pmode, addr);
26915 /* This should not be of rs6000_sr_alias_set, because of
26916 __builtin_return_address. */
26918 insn = emit_move_insn (mem, reg);
26919 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26920 NULL_RTX, NULL_RTX);
26921 END_USE (0);
26925 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26926 r12 will be needed by out-of-line gpr restore. */
26927 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26928 && !(strategy & (SAVE_INLINE_GPRS
26929 | SAVE_NOINLINE_GPRS_SAVES_LR))
26930 ? 11 : 12);
26931 if (!WORLD_SAVE_P (info)
26932 && info->cr_save_p
26933 && REGNO (frame_reg_rtx) != cr_save_regno
26934 && !(using_static_chain_p && cr_save_regno == 11)
26935 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26937 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26938 START_USE (cr_save_regno);
26939 rs6000_emit_move_from_cr (cr_save_rtx);
26942 /* Do any required saving of fpr's. If only one or two to save, do
26943 it ourselves. Otherwise, call function. */
26944 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26946 int i;
26947 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26948 if (save_reg_p (info->first_fp_reg_save + i))
26949 emit_frame_save (frame_reg_rtx,
26950 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26951 ? DFmode : SFmode),
26952 info->first_fp_reg_save + i,
26953 info->fp_save_offset + frame_off + 8 * i,
26954 sp_off - frame_off);
26956 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26958 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26959 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26960 unsigned ptr_regno = ptr_regno_for_savres (sel);
26961 rtx ptr_reg = frame_reg_rtx;
26963 if (REGNO (frame_reg_rtx) == ptr_regno)
26964 gcc_checking_assert (frame_off == 0);
26965 else
26967 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26968 NOT_INUSE (ptr_regno);
26969 emit_insn (gen_add3_insn (ptr_reg,
26970 frame_reg_rtx, GEN_INT (frame_off)));
26972 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26973 info->fp_save_offset,
26974 info->lr_save_offset,
26975 DFmode, sel);
26976 rs6000_frame_related (insn, ptr_reg, sp_off,
26977 NULL_RTX, NULL_RTX);
26978 if (lr)
26979 END_USE (0);
26982 /* Save GPRs. This is done as a PARALLEL if we are using
26983 the store-multiple instructions. */
26984 if (!WORLD_SAVE_P (info)
26985 && TARGET_SPE_ABI
26986 && info->spe_64bit_regs_used != 0
26987 && info->first_gp_reg_save != 32)
26989 int i;
26990 rtx spe_save_area_ptr;
26991 HOST_WIDE_INT save_off;
26992 int ool_adjust = 0;
26994 /* Determine whether we can address all of the registers that need
26995 to be saved with an offset from frame_reg_rtx that fits in
26996 the small const field for SPE memory instructions. */
26997 int spe_regs_addressable
26998 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
26999 + reg_size * (32 - info->first_gp_reg_save - 1))
27000 && (strategy & SAVE_INLINE_GPRS));
27002 if (spe_regs_addressable)
27004 spe_save_area_ptr = frame_reg_rtx;
27005 save_off = frame_off;
27007 else
27009 /* Make r11 point to the start of the SPE save area. We need
27010 to be careful here if r11 is holding the static chain. If
27011 it is, then temporarily save it in r0. */
27012 HOST_WIDE_INT offset;
27014 if (!(strategy & SAVE_INLINE_GPRS))
27015 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
27016 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
27017 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
27018 save_off = frame_off - offset;
27020 if (using_static_chain_p)
27022 rtx r0 = gen_rtx_REG (Pmode, 0);
27024 START_USE (0);
27025 gcc_assert (info->first_gp_reg_save > 11);
27027 emit_move_insn (r0, spe_save_area_ptr);
27029 else if (REGNO (frame_reg_rtx) != 11)
27030 START_USE (11);
27032 emit_insn (gen_addsi3 (spe_save_area_ptr,
27033 frame_reg_rtx, GEN_INT (offset)));
27034 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
27035 frame_off = -info->spe_gp_save_offset + ool_adjust;
27038 if ((strategy & SAVE_INLINE_GPRS))
27040 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27041 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
27042 emit_frame_save (spe_save_area_ptr, reg_mode,
27043 info->first_gp_reg_save + i,
27044 (info->spe_gp_save_offset + save_off
27045 + reg_size * i),
27046 sp_off - save_off);
27048 else
27050 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
27051 info->spe_gp_save_offset + save_off,
27052 0, reg_mode,
27053 SAVRES_SAVE | SAVRES_GPR);
27055 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
27056 NULL_RTX, NULL_RTX);
27059 /* Move the static chain pointer back. */
27060 if (!spe_regs_addressable)
27062 if (using_static_chain_p)
27064 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
27065 END_USE (0);
27067 else if (REGNO (frame_reg_rtx) != 11)
27068 END_USE (11);
27071 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27073 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27074 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27075 unsigned ptr_regno = ptr_regno_for_savres (sel);
27076 rtx ptr_reg = frame_reg_rtx;
27077 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27078 int end_save = info->gp_save_offset + info->gp_size;
27079 int ptr_off;
27081 if (ptr_regno == 12)
27082 sp_adjust = 0;
27083 if (!ptr_set_up)
27084 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27086 /* Need to adjust r11 (r12) if we saved any FPRs. */
27087 if (end_save + frame_off != 0)
27089 rtx offset = GEN_INT (end_save + frame_off);
27091 if (ptr_set_up)
27092 frame_off = -end_save;
27093 else
27094 NOT_INUSE (ptr_regno);
27095 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27097 else if (!ptr_set_up)
27099 NOT_INUSE (ptr_regno);
27100 emit_move_insn (ptr_reg, frame_reg_rtx);
27102 ptr_off = -end_save;
27103 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27104 info->gp_save_offset + ptr_off,
27105 info->lr_save_offset + ptr_off,
27106 reg_mode, sel);
27107 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27108 NULL_RTX, NULL_RTX);
27109 if (lr)
27110 END_USE (0);
27112 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27114 rtvec p;
27115 int i;
27116 p = rtvec_alloc (32 - info->first_gp_reg_save);
27117 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27118 RTVEC_ELT (p, i)
27119 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27120 frame_reg_rtx,
27121 info->gp_save_offset + frame_off + reg_size * i);
27122 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27123 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27124 NULL_RTX, NULL_RTX);
27126 else if (!WORLD_SAVE_P (info))
27128 int i;
27129 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27130 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
27131 emit_frame_save (frame_reg_rtx, reg_mode,
27132 info->first_gp_reg_save + i,
27133 info->gp_save_offset + frame_off + reg_size * i,
27134 sp_off - frame_off);
27137 if (crtl->calls_eh_return)
27139 unsigned int i;
27140 rtvec p;
27142 for (i = 0; ; ++i)
27144 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27145 if (regno == INVALID_REGNUM)
27146 break;
27149 p = rtvec_alloc (i);
27151 for (i = 0; ; ++i)
27153 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27154 if (regno == INVALID_REGNUM)
27155 break;
27157 insn
27158 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27159 sp_reg_rtx,
27160 info->ehrd_offset + sp_off + reg_size * (int) i);
27161 RTVEC_ELT (p, i) = insn;
27162 RTX_FRAME_RELATED_P (insn) = 1;
27165 insn = emit_insn (gen_blockage ());
27166 RTX_FRAME_RELATED_P (insn) = 1;
27167 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27170 /* In AIX ABI we need to make sure r2 is really saved. */
27171 if (TARGET_AIX && crtl->calls_eh_return)
27173 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27174 rtx save_insn, join_insn, note;
27175 long toc_restore_insn;
27177 tmp_reg = gen_rtx_REG (Pmode, 11);
27178 tmp_reg_si = gen_rtx_REG (SImode, 11);
27179 if (using_static_chain_p)
27181 START_USE (0);
27182 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27184 else
27185 START_USE (11);
27186 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27187 /* Peek at instruction to which this function returns. If it's
27188 restoring r2, then we know we've already saved r2. We can't
27189 unconditionally save r2 because the value we have will already
27190 be updated if we arrived at this function via a plt call or
27191 toc adjusting stub. */
27192 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27193 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27194 + RS6000_TOC_SAVE_SLOT);
27195 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27196 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27197 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27198 validate_condition_mode (EQ, CCUNSmode);
27199 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27200 emit_insn (gen_rtx_SET (compare_result,
27201 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27202 toc_save_done = gen_label_rtx ();
27203 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27204 gen_rtx_EQ (VOIDmode, compare_result,
27205 const0_rtx),
27206 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27207 pc_rtx);
27208 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27209 JUMP_LABEL (jump) = toc_save_done;
27210 LABEL_NUSES (toc_save_done) += 1;
27212 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27213 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27214 sp_off - frame_off);
27216 emit_label (toc_save_done);
27218 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27219 have a CFG that has different saves along different paths.
27220 Move the note to a dummy blockage insn, which describes that
27221 R2 is unconditionally saved after the label. */
27222 /* ??? An alternate representation might be a special insn pattern
27223 containing both the branch and the store. That might let the
27224 code that minimizes the number of DW_CFA_advance opcodes better
27225 freedom in placing the annotations. */
27226 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27227 if (note)
27228 remove_note (save_insn, note);
27229 else
27230 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27231 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27232 RTX_FRAME_RELATED_P (save_insn) = 0;
27234 join_insn = emit_insn (gen_blockage ());
27235 REG_NOTES (join_insn) = note;
27236 RTX_FRAME_RELATED_P (join_insn) = 1;
27238 if (using_static_chain_p)
27240 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27241 END_USE (0);
27243 else
27244 END_USE (11);
27247 /* Save CR if we use any that must be preserved. */
27248 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27250 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27251 GEN_INT (info->cr_save_offset + frame_off));
27252 rtx mem = gen_frame_mem (SImode, addr);
27254 /* If we didn't copy cr before, do so now using r0. */
27255 if (cr_save_rtx == NULL_RTX)
27257 START_USE (0);
27258 cr_save_rtx = gen_rtx_REG (SImode, 0);
27259 rs6000_emit_move_from_cr (cr_save_rtx);
27262 /* Saving CR requires a two-instruction sequence: one instruction
27263 to move the CR to a general-purpose register, and a second
27264 instruction that stores the GPR to memory.
27266 We do not emit any DWARF CFI records for the first of these,
27267 because we cannot properly represent the fact that CR is saved in
27268 a register. One reason is that we cannot express that multiple
27269 CR fields are saved; another reason is that on 64-bit, the size
27270 of the CR register in DWARF (4 bytes) differs from the size of
27271 a general-purpose register.
27273 This means if any intervening instruction were to clobber one of
27274 the call-saved CR fields, we'd have incorrect CFI. To prevent
27275 this from happening, we mark the store to memory as a use of
27276 those CR fields, which prevents any such instruction from being
27277 scheduled in between the two instructions. */
27278 rtx crsave_v[9];
27279 int n_crsave = 0;
27280 int i;
27282 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27283 for (i = 0; i < 8; i++)
27284 if (save_reg_p (CR0_REGNO + i))
27285 crsave_v[n_crsave++]
27286 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27288 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27289 gen_rtvec_v (n_crsave, crsave_v)));
27290 END_USE (REGNO (cr_save_rtx));
27292 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27293 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27294 so we need to construct a frame expression manually. */
27295 RTX_FRAME_RELATED_P (insn) = 1;
27297 /* Update address to be stack-pointer relative, like
27298 rs6000_frame_related would do. */
27299 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27300 GEN_INT (info->cr_save_offset + sp_off));
27301 mem = gen_frame_mem (SImode, addr);
27303 if (DEFAULT_ABI == ABI_ELFv2)
27305 /* In the ELFv2 ABI we generate separate CFI records for each
27306 CR field that was actually saved. They all point to the
27307 same 32-bit stack slot. */
27308 rtx crframe[8];
27309 int n_crframe = 0;
27311 for (i = 0; i < 8; i++)
27312 if (save_reg_p (CR0_REGNO + i))
27314 crframe[n_crframe]
27315 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27317 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27318 n_crframe++;
27321 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27322 gen_rtx_PARALLEL (VOIDmode,
27323 gen_rtvec_v (n_crframe, crframe)));
27325 else
27327 /* In other ABIs, by convention, we use a single CR regnum to
27328 represent the fact that all call-saved CR fields are saved.
27329 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27330 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27331 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27335 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27336 *separate* slots if the routine calls __builtin_eh_return, so
27337 that they can be independently restored by the unwinder. */
27338 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27340 int i, cr_off = info->ehcr_offset;
27341 rtx crsave;
27343 /* ??? We might get better performance by using multiple mfocrf
27344 instructions. */
27345 crsave = gen_rtx_REG (SImode, 0);
27346 emit_insn (gen_movesi_from_cr (crsave));
27348 for (i = 0; i < 8; i++)
27349 if (!call_used_regs[CR0_REGNO + i])
27351 rtvec p = rtvec_alloc (2);
27352 RTVEC_ELT (p, 0)
27353 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27354 RTVEC_ELT (p, 1)
27355 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27357 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27359 RTX_FRAME_RELATED_P (insn) = 1;
27360 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27361 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27362 sp_reg_rtx, cr_off + sp_off));
27364 cr_off += reg_size;
27368 /* Update stack and set back pointer unless this is V.4,
27369 for which it was done previously. */
27370 if (!WORLD_SAVE_P (info) && info->push_p
27371 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27373 rtx ptr_reg = NULL;
27374 int ptr_off = 0;
27376 /* If saving altivec regs we need to be able to address all save
27377 locations using a 16-bit offset. */
27378 if ((strategy & SAVE_INLINE_VRS) == 0
27379 || (info->altivec_size != 0
27380 && (info->altivec_save_offset + info->altivec_size - 16
27381 + info->total_size - frame_off) > 32767)
27382 || (info->vrsave_size != 0
27383 && (info->vrsave_save_offset
27384 + info->total_size - frame_off) > 32767))
27386 int sel = SAVRES_SAVE | SAVRES_VR;
27387 unsigned ptr_regno = ptr_regno_for_savres (sel);
27389 if (using_static_chain_p
27390 && ptr_regno == STATIC_CHAIN_REGNUM)
27391 ptr_regno = 12;
27392 if (REGNO (frame_reg_rtx) != ptr_regno)
27393 START_USE (ptr_regno);
27394 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27395 frame_reg_rtx = ptr_reg;
27396 ptr_off = info->altivec_save_offset + info->altivec_size;
27397 frame_off = -ptr_off;
27399 else if (REGNO (frame_reg_rtx) == 1)
27400 frame_off = info->total_size;
27401 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27402 ptr_reg, ptr_off);
27403 if (REGNO (frame_reg_rtx) == 12)
27404 sp_adjust = 0;
27405 sp_off = info->total_size;
27406 if (frame_reg_rtx != sp_reg_rtx)
27407 rs6000_emit_stack_tie (frame_reg_rtx, false);
27410 /* Set frame pointer, if needed. */
27411 if (frame_pointer_needed)
27413 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27414 sp_reg_rtx);
27415 RTX_FRAME_RELATED_P (insn) = 1;
27418 /* Save AltiVec registers if needed. Save here because the red zone does
27419 not always include AltiVec registers. */
27420 if (!WORLD_SAVE_P (info)
27421 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27423 int end_save = info->altivec_save_offset + info->altivec_size;
27424 int ptr_off;
27425 /* Oddly, the vector save/restore functions point r0 at the end
27426 of the save area, then use r11 or r12 to load offsets for
27427 [reg+reg] addressing. */
27428 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27429 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27430 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27432 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27433 NOT_INUSE (0);
27434 if (scratch_regno == 12)
27435 sp_adjust = 0;
27436 if (end_save + frame_off != 0)
27438 rtx offset = GEN_INT (end_save + frame_off);
27440 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27442 else
27443 emit_move_insn (ptr_reg, frame_reg_rtx);
27445 ptr_off = -end_save;
27446 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27447 info->altivec_save_offset + ptr_off,
27448 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27449 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27450 NULL_RTX, NULL_RTX);
27451 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27453 /* The oddity mentioned above clobbered our frame reg. */
27454 emit_move_insn (frame_reg_rtx, ptr_reg);
27455 frame_off = ptr_off;
27458 else if (!WORLD_SAVE_P (info)
27459 && info->altivec_size != 0)
27461 int i;
27463 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27464 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27466 rtx areg, savereg, mem;
27467 HOST_WIDE_INT offset;
27469 offset = (info->altivec_save_offset + frame_off
27470 + 16 * (i - info->first_altivec_reg_save));
27472 savereg = gen_rtx_REG (V4SImode, i);
27474 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27476 mem = gen_frame_mem (V4SImode,
27477 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27478 GEN_INT (offset)));
27479 insn = emit_insn (gen_rtx_SET (mem, savereg));
27480 areg = NULL_RTX;
27482 else
27484 NOT_INUSE (0);
27485 areg = gen_rtx_REG (Pmode, 0);
27486 emit_move_insn (areg, GEN_INT (offset));
27488 /* AltiVec addressing mode is [reg+reg]. */
27489 mem = gen_frame_mem (V4SImode,
27490 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27492 /* Rather than emitting a generic move, force use of the stvx
27493 instruction, which we always want on ISA 2.07 (power8) systems.
27494 In particular we don't want xxpermdi/stxvd2x for little
27495 endian. */
27496 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27499 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27500 areg, GEN_INT (offset));
27504 /* VRSAVE is a bit vector representing which AltiVec registers
27505 are used. The OS uses this to determine which vector
27506 registers to save on a context switch. We need to save
27507 VRSAVE on the stack frame, add whatever AltiVec registers we
27508 used in this function, and do the corresponding magic in the
27509 epilogue. */
27511 if (!WORLD_SAVE_P (info)
27512 && info->vrsave_size != 0)
27514 rtx reg, vrsave;
27515 int offset;
27516 int save_regno;
27518 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
27519 be using r12 as frame_reg_rtx and r11 as the static chain
27520 pointer for nested functions. */
27521 save_regno = 12;
27522 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27523 && !using_static_chain_p)
27524 save_regno = 11;
27525 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27527 save_regno = 11;
27528 if (using_static_chain_p)
27529 save_regno = 0;
27532 NOT_INUSE (save_regno);
27533 reg = gen_rtx_REG (SImode, save_regno);
27534 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
27535 if (TARGET_MACHO)
27536 emit_insn (gen_get_vrsave_internal (reg));
27537 else
27538 emit_insn (gen_rtx_SET (reg, vrsave));
27540 /* Save VRSAVE. */
27541 offset = info->vrsave_save_offset + frame_off;
27542 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
27544 /* Include the registers in the mask. */
27545 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
27547 insn = emit_insn (generate_set_vrsave (reg, info, 0));
27550 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27551 if (!TARGET_SINGLE_PIC_BASE
27552 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
27553 || (DEFAULT_ABI == ABI_V4
27554 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27555 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27557 /* If emit_load_toc_table will use the link register, we need to save
27558 it. We use R12 for this purpose because emit_load_toc_table
27559 can use register 0. This allows us to use a plain 'blr' to return
27560 from the procedure more often. */
27561 int save_LR_around_toc_setup = (TARGET_ELF
27562 && DEFAULT_ABI == ABI_V4
27563 && flag_pic
27564 && ! info->lr_save_p
27565 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27566 if (save_LR_around_toc_setup)
27568 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27569 rtx tmp = gen_rtx_REG (Pmode, 12);
27571 sp_adjust = 0;
27572 insn = emit_move_insn (tmp, lr);
27573 RTX_FRAME_RELATED_P (insn) = 1;
27575 rs6000_emit_load_toc_table (TRUE);
27577 insn = emit_move_insn (lr, tmp);
27578 add_reg_note (insn, REG_CFA_RESTORE, lr);
27579 RTX_FRAME_RELATED_P (insn) = 1;
27581 else
27582 rs6000_emit_load_toc_table (TRUE);
27585 #if TARGET_MACHO
27586 if (!TARGET_SINGLE_PIC_BASE
27587 && DEFAULT_ABI == ABI_DARWIN
27588 && flag_pic && crtl->uses_pic_offset_table)
27590 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27591 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27593 /* Save and restore LR locally around this call (in R0). */
27594 if (!info->lr_save_p)
27595 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27597 emit_insn (gen_load_macho_picbase (src));
27599 emit_move_insn (gen_rtx_REG (Pmode,
27600 RS6000_PIC_OFFSET_TABLE_REGNUM),
27601 lr);
27603 if (!info->lr_save_p)
27604 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27606 #endif
27608 /* If we need to, save the TOC register after doing the stack setup.
27609 Do not emit eh frame info for this save. The unwinder wants info,
27610 conceptually attached to instructions in this function, about
27611 register values in the caller of this function. This R2 may have
27612 already been changed from the value in the caller.
27613 We don't attempt to write accurate DWARF EH frame info for R2
27614 because code emitted by gcc for a (non-pointer) function call
27615 doesn't save and restore R2. Instead, R2 is managed out-of-line
27616 by a linker generated plt call stub when the function resides in
27617 a shared library. This behavior is costly to describe in DWARF,
27618 both in terms of the size of DWARF info and the time taken in the
27619 unwinder to interpret it. R2 changes, apart from the
27620 calls_eh_return case earlier in this function, are handled by
27621 linux-unwind.h frob_update_context. */
27622 if (rs6000_save_toc_in_prologue_p ())
27624 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27625 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27628 if (using_split_stack && split_stack_arg_pointer_used_p ())
27630 /* Set up the arg pointer (r12) for -fsplit-stack code. If
27631 __morestack was called, it left the arg pointer to the old
27632 stack in r29. Otherwise, the arg pointer is the top of the
27633 current frame. */
27634 cfun->machine->split_stack_argp_used = true;
27635 if (sp_adjust)
27637 rtx r12 = gen_rtx_REG (Pmode, 12);
27638 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
27639 emit_insn_before (set_r12, sp_adjust);
27641 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
27643 rtx r12 = gen_rtx_REG (Pmode, 12);
27644 if (frame_off == 0)
27645 emit_move_insn (r12, frame_reg_rtx);
27646 else
27647 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
27649 if (info->push_p)
27651 rtx r12 = gen_rtx_REG (Pmode, 12);
27652 rtx r29 = gen_rtx_REG (Pmode, 29);
27653 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
27654 rtx not_more = gen_label_rtx ();
27655 rtx jump;
27657 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27658 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
27659 gen_rtx_LABEL_REF (VOIDmode, not_more),
27660 pc_rtx);
27661 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27662 JUMP_LABEL (jump) = not_more;
27663 LABEL_NUSES (not_more) += 1;
27664 emit_move_insn (r12, r29);
27665 emit_label (not_more);
27670 /* Output .extern statements for the save/restore routines we use. */
27672 static void
27673 rs6000_output_savres_externs (FILE *file)
27675 rs6000_stack_t *info = rs6000_stack_info ();
27677 if (TARGET_DEBUG_STACK)
27678 debug_stack_info (info);
27680 /* Write .extern for any function we will call to save and restore
27681 fp values. */
27682 if (info->first_fp_reg_save < 64
27683 && !TARGET_MACHO
27684 && !TARGET_ELF)
27686 char *name;
27687 int regno = info->first_fp_reg_save - 32;
27689 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27691 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27692 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27693 name = rs6000_savres_routine_name (info, regno, sel);
27694 fprintf (file, "\t.extern %s\n", name);
27696 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27698 bool lr = (info->savres_strategy
27699 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27700 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27701 name = rs6000_savres_routine_name (info, regno, sel);
27702 fprintf (file, "\t.extern %s\n", name);
27707 /* Write function prologue. */
27709 static void
27710 rs6000_output_function_prologue (FILE *file,
27711 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
27713 if (!cfun->is_thunk)
27714 rs6000_output_savres_externs (file);
27716 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27717 immediately after the global entry point label. */
27718 if (rs6000_global_entry_point_needed_p ())
27720 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27722 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27724 if (TARGET_CMODEL != CMODEL_LARGE)
27726 /* In the small and medium code models, we assume the TOC is less
27727 2 GB away from the text section, so it can be computed via the
27728 following two-instruction sequence. */
27729 char buf[256];
27731 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27732 fprintf (file, "0:\taddis 2,12,.TOC.-");
27733 assemble_name (file, buf);
27734 fprintf (file, "@ha\n");
27735 fprintf (file, "\taddi 2,2,.TOC.-");
27736 assemble_name (file, buf);
27737 fprintf (file, "@l\n");
27739 else
27741 /* In the large code model, we allow arbitrary offsets between the
27742 TOC and the text section, so we have to load the offset from
27743 memory. The data field is emitted directly before the global
27744 entry point in rs6000_elf_declare_function_name. */
27745 char buf[256];
27747 #ifdef HAVE_AS_ENTRY_MARKERS
27748 /* If supported by the linker, emit a marker relocation. If the
27749 total code size of the final executable or shared library
27750 happens to fit into 2 GB after all, the linker will replace
27751 this code sequence with the sequence for the small or medium
27752 code model. */
27753 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27754 #endif
27755 fprintf (file, "\tld 2,");
27756 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27757 assemble_name (file, buf);
27758 fprintf (file, "-");
27759 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27760 assemble_name (file, buf);
27761 fprintf (file, "(12)\n");
27762 fprintf (file, "\tadd 2,2,12\n");
27765 fputs ("\t.localentry\t", file);
27766 assemble_name (file, name);
27767 fputs (",.-", file);
27768 assemble_name (file, name);
27769 fputs ("\n", file);
27772 /* Output -mprofile-kernel code. This needs to be done here instead of
27773 in output_function_profile since it must go after the ELFv2 ABI
27774 local entry point. */
27775 if (TARGET_PROFILE_KERNEL && crtl->profile)
27777 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27778 gcc_assert (!TARGET_32BIT);
27780 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27782 /* In the ELFv2 ABI we have no compiler stack word. It must be
27783 the resposibility of _mcount to preserve the static chain
27784 register if required. */
27785 if (DEFAULT_ABI != ABI_ELFv2
27786 && cfun->static_chain_decl != NULL)
27788 asm_fprintf (file, "\tstd %s,24(%s)\n",
27789 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27790 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27791 asm_fprintf (file, "\tld %s,24(%s)\n",
27792 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27794 else
27795 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27798 rs6000_pic_labelno++;
27801 /* -mprofile-kernel code calls mcount before the function prolog,
27802 so a profiled leaf function should stay a leaf function. */
27803 static bool
27804 rs6000_keep_leaf_when_profiled ()
27806 return TARGET_PROFILE_KERNEL;
27809 /* Non-zero if vmx regs are restored before the frame pop, zero if
27810 we restore after the pop when possible. */
27811 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27813 /* Restoring cr is a two step process: loading a reg from the frame
27814 save, then moving the reg to cr. For ABI_V4 we must let the
27815 unwinder know that the stack location is no longer valid at or
27816 before the stack deallocation, but we can't emit a cfa_restore for
27817 cr at the stack deallocation like we do for other registers.
27818 The trouble is that it is possible for the move to cr to be
27819 scheduled after the stack deallocation. So say exactly where cr
27820 is located on each of the two insns. */
27822 static rtx
27823 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27825 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27826 rtx reg = gen_rtx_REG (SImode, regno);
27827 rtx_insn *insn = emit_move_insn (reg, mem);
27829 if (!exit_func && DEFAULT_ABI == ABI_V4)
27831 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27832 rtx set = gen_rtx_SET (reg, cr);
27834 add_reg_note (insn, REG_CFA_REGISTER, set);
27835 RTX_FRAME_RELATED_P (insn) = 1;
27837 return reg;
27840 /* Reload CR from REG. */
27842 static void
27843 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27845 int count = 0;
27846 int i;
27848 if (using_mfcr_multiple)
27850 for (i = 0; i < 8; i++)
27851 if (save_reg_p (CR0_REGNO + i))
27852 count++;
27853 gcc_assert (count);
27856 if (using_mfcr_multiple && count > 1)
27858 rtx_insn *insn;
27859 rtvec p;
27860 int ndx;
27862 p = rtvec_alloc (count);
27864 ndx = 0;
27865 for (i = 0; i < 8; i++)
27866 if (save_reg_p (CR0_REGNO + i))
27868 rtvec r = rtvec_alloc (2);
27869 RTVEC_ELT (r, 0) = reg;
27870 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27871 RTVEC_ELT (p, ndx) =
27872 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27873 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27874 ndx++;
27876 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27877 gcc_assert (ndx == count);
27879 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27880 CR field separately. */
27881 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27883 for (i = 0; i < 8; i++)
27884 if (save_reg_p (CR0_REGNO + i))
27885 add_reg_note (insn, REG_CFA_RESTORE,
27886 gen_rtx_REG (SImode, CR0_REGNO + i));
27888 RTX_FRAME_RELATED_P (insn) = 1;
27891 else
27892 for (i = 0; i < 8; i++)
27893 if (save_reg_p (CR0_REGNO + i))
27895 rtx insn = emit_insn (gen_movsi_to_cr_one
27896 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27898 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27899 CR field separately, attached to the insn that in fact
27900 restores this particular CR field. */
27901 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27903 add_reg_note (insn, REG_CFA_RESTORE,
27904 gen_rtx_REG (SImode, CR0_REGNO + i));
27906 RTX_FRAME_RELATED_P (insn) = 1;
27910 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27911 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27912 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27914 rtx_insn *insn = get_last_insn ();
27915 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27917 add_reg_note (insn, REG_CFA_RESTORE, cr);
27918 RTX_FRAME_RELATED_P (insn) = 1;
27922 /* Like cr, the move to lr instruction can be scheduled after the
27923 stack deallocation, but unlike cr, its stack frame save is still
27924 valid. So we only need to emit the cfa_restore on the correct
27925 instruction. */
27927 static void
27928 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27930 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27931 rtx reg = gen_rtx_REG (Pmode, regno);
27933 emit_move_insn (reg, mem);
27936 static void
27937 restore_saved_lr (int regno, bool exit_func)
27939 rtx reg = gen_rtx_REG (Pmode, regno);
27940 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27941 rtx_insn *insn = emit_move_insn (lr, reg);
27943 if (!exit_func && flag_shrink_wrap)
27945 add_reg_note (insn, REG_CFA_RESTORE, lr);
27946 RTX_FRAME_RELATED_P (insn) = 1;
27950 static rtx
27951 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27953 if (DEFAULT_ABI == ABI_ELFv2)
27955 int i;
27956 for (i = 0; i < 8; i++)
27957 if (save_reg_p (CR0_REGNO + i))
27959 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27960 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27961 cfa_restores);
27964 else if (info->cr_save_p)
27965 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27966 gen_rtx_REG (SImode, CR2_REGNO),
27967 cfa_restores);
27969 if (info->lr_save_p)
27970 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27971 gen_rtx_REG (Pmode, LR_REGNO),
27972 cfa_restores);
27973 return cfa_restores;
27976 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27977 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27978 below stack pointer not cloberred by signals. */
27980 static inline bool
27981 offset_below_red_zone_p (HOST_WIDE_INT offset)
27983 return offset < (DEFAULT_ABI == ABI_V4
27985 : TARGET_32BIT ? -220 : -288);
27988 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27990 static void
27991 emit_cfa_restores (rtx cfa_restores)
27993 rtx_insn *insn = get_last_insn ();
27994 rtx *loc = &REG_NOTES (insn);
27996 while (*loc)
27997 loc = &XEXP (*loc, 1);
27998 *loc = cfa_restores;
27999 RTX_FRAME_RELATED_P (insn) = 1;
28002 /* Emit function epilogue as insns. */
28004 void
28005 rs6000_emit_epilogue (int sibcall)
28007 rs6000_stack_t *info;
28008 int restoring_GPRs_inline;
28009 int restoring_FPRs_inline;
28010 int using_load_multiple;
28011 int using_mtcr_multiple;
28012 int use_backchain_to_restore_sp;
28013 int restore_lr;
28014 int strategy;
28015 HOST_WIDE_INT frame_off = 0;
28016 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28017 rtx frame_reg_rtx = sp_reg_rtx;
28018 rtx cfa_restores = NULL_RTX;
28019 rtx insn;
28020 rtx cr_save_reg = NULL_RTX;
28021 machine_mode reg_mode = Pmode;
28022 int reg_size = TARGET_32BIT ? 4 : 8;
28023 int i;
28024 bool exit_func;
28025 unsigned ptr_regno;
28027 info = rs6000_stack_info ();
28029 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
28031 reg_mode = V2SImode;
28032 reg_size = 8;
28035 strategy = info->savres_strategy;
28036 using_load_multiple = strategy & REST_MULTIPLE;
28037 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28038 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28039 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
28040 || rs6000_cpu == PROCESSOR_PPC603
28041 || rs6000_cpu == PROCESSOR_PPC750
28042 || optimize_size);
28043 /* Restore via the backchain when we have a large frame, since this
28044 is more efficient than an addis, addi pair. The second condition
28045 here will not trigger at the moment; We don't actually need a
28046 frame pointer for alloca, but the generic parts of the compiler
28047 give us one anyway. */
28048 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28049 ? info->lr_save_offset
28050 : 0) > 32767
28051 || (cfun->calls_alloca
28052 && !frame_pointer_needed));
28053 restore_lr = (info->lr_save_p
28054 && (restoring_FPRs_inline
28055 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28056 && (restoring_GPRs_inline
28057 || info->first_fp_reg_save < 64));
28059 if (WORLD_SAVE_P (info))
28061 int i, j;
28062 char rname[30];
28063 const char *alloc_rname;
28064 rtvec p;
28066 /* eh_rest_world_r10 will return to the location saved in the LR
28067 stack slot (which is not likely to be our caller.)
28068 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28069 rest_world is similar, except any R10 parameter is ignored.
28070 The exception-handling stuff that was here in 2.95 is no
28071 longer necessary. */
28073 p = rtvec_alloc (9
28075 + 32 - info->first_gp_reg_save
28076 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28077 + 63 + 1 - info->first_fp_reg_save);
28079 strcpy (rname, ((crtl->calls_eh_return) ?
28080 "*eh_rest_world_r10" : "*rest_world"));
28081 alloc_rname = ggc_strdup (rname);
28083 j = 0;
28084 RTVEC_ELT (p, j++) = ret_rtx;
28085 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
28086 gen_rtx_REG (Pmode,
28087 LR_REGNO));
28088 RTVEC_ELT (p, j++)
28089 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28090 /* The instruction pattern requires a clobber here;
28091 it is shared with the restVEC helper. */
28092 RTVEC_ELT (p, j++)
28093 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
28096 /* CR register traditionally saved as CR2. */
28097 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28098 RTVEC_ELT (p, j++)
28099 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28100 if (flag_shrink_wrap)
28102 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28103 gen_rtx_REG (Pmode, LR_REGNO),
28104 cfa_restores);
28105 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28109 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28111 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28112 RTVEC_ELT (p, j++)
28113 = gen_frame_load (reg,
28114 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28115 if (flag_shrink_wrap)
28116 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28118 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28120 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28121 RTVEC_ELT (p, j++)
28122 = gen_frame_load (reg,
28123 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28124 if (flag_shrink_wrap)
28125 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28127 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28129 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28130 ? DFmode : SFmode),
28131 info->first_fp_reg_save + i);
28132 RTVEC_ELT (p, j++)
28133 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28134 if (flag_shrink_wrap)
28135 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28137 RTVEC_ELT (p, j++)
28138 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28139 RTVEC_ELT (p, j++)
28140 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28141 RTVEC_ELT (p, j++)
28142 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28143 RTVEC_ELT (p, j++)
28144 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28145 RTVEC_ELT (p, j++)
28146 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28147 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28149 if (flag_shrink_wrap)
28151 REG_NOTES (insn) = cfa_restores;
28152 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28153 RTX_FRAME_RELATED_P (insn) = 1;
28155 return;
28158 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28159 if (info->push_p)
28160 frame_off = info->total_size;
28162 /* Restore AltiVec registers if we must do so before adjusting the
28163 stack. */
28164 if (info->altivec_size != 0
28165 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28166 || (DEFAULT_ABI != ABI_V4
28167 && offset_below_red_zone_p (info->altivec_save_offset))))
28169 int i;
28170 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28172 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28173 if (use_backchain_to_restore_sp)
28175 int frame_regno = 11;
28177 if ((strategy & REST_INLINE_VRS) == 0)
28179 /* Of r11 and r12, select the one not clobbered by an
28180 out-of-line restore function for the frame register. */
28181 frame_regno = 11 + 12 - scratch_regno;
28183 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28184 emit_move_insn (frame_reg_rtx,
28185 gen_rtx_MEM (Pmode, sp_reg_rtx));
28186 frame_off = 0;
28188 else if (frame_pointer_needed)
28189 frame_reg_rtx = hard_frame_pointer_rtx;
28191 if ((strategy & REST_INLINE_VRS) == 0)
28193 int end_save = info->altivec_save_offset + info->altivec_size;
28194 int ptr_off;
28195 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28196 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28198 if (end_save + frame_off != 0)
28200 rtx offset = GEN_INT (end_save + frame_off);
28202 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28204 else
28205 emit_move_insn (ptr_reg, frame_reg_rtx);
28207 ptr_off = -end_save;
28208 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28209 info->altivec_save_offset + ptr_off,
28210 0, V4SImode, SAVRES_VR);
28212 else
28214 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28215 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28217 rtx addr, areg, mem, insn;
28218 rtx reg = gen_rtx_REG (V4SImode, i);
28219 HOST_WIDE_INT offset
28220 = (info->altivec_save_offset + frame_off
28221 + 16 * (i - info->first_altivec_reg_save));
28223 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
28225 mem = gen_frame_mem (V4SImode,
28226 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28227 GEN_INT (offset)));
28228 insn = gen_rtx_SET (reg, mem);
28230 else
28232 areg = gen_rtx_REG (Pmode, 0);
28233 emit_move_insn (areg, GEN_INT (offset));
28235 /* AltiVec addressing mode is [reg+reg]. */
28236 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28237 mem = gen_frame_mem (V4SImode, addr);
28239 /* Rather than emitting a generic move, force use of the
28240 lvx instruction, which we always want. In particular we
28241 don't want lxvd2x/xxpermdi for little endian. */
28242 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28245 (void) emit_insn (insn);
28249 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28250 if (((strategy & REST_INLINE_VRS) == 0
28251 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28252 && (flag_shrink_wrap
28253 || (offset_below_red_zone_p
28254 (info->altivec_save_offset
28255 + 16 * (i - info->first_altivec_reg_save)))))
28257 rtx reg = gen_rtx_REG (V4SImode, i);
28258 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28262 /* Restore VRSAVE if we must do so before adjusting the stack. */
28263 if (info->vrsave_size != 0
28264 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28265 || (DEFAULT_ABI != ABI_V4
28266 && offset_below_red_zone_p (info->vrsave_save_offset))))
28268 rtx reg;
28270 if (frame_reg_rtx == sp_reg_rtx)
28272 if (use_backchain_to_restore_sp)
28274 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28275 emit_move_insn (frame_reg_rtx,
28276 gen_rtx_MEM (Pmode, sp_reg_rtx));
28277 frame_off = 0;
28279 else if (frame_pointer_needed)
28280 frame_reg_rtx = hard_frame_pointer_rtx;
28283 reg = gen_rtx_REG (SImode, 12);
28284 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28285 info->vrsave_save_offset + frame_off));
28287 emit_insn (generate_set_vrsave (reg, info, 1));
28290 insn = NULL_RTX;
28291 /* If we have a large stack frame, restore the old stack pointer
28292 using the backchain. */
28293 if (use_backchain_to_restore_sp)
28295 if (frame_reg_rtx == sp_reg_rtx)
28297 /* Under V.4, don't reset the stack pointer until after we're done
28298 loading the saved registers. */
28299 if (DEFAULT_ABI == ABI_V4)
28300 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28302 insn = emit_move_insn (frame_reg_rtx,
28303 gen_rtx_MEM (Pmode, sp_reg_rtx));
28304 frame_off = 0;
28306 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28307 && DEFAULT_ABI == ABI_V4)
28308 /* frame_reg_rtx has been set up by the altivec restore. */
28310 else
28312 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28313 frame_reg_rtx = sp_reg_rtx;
28316 /* If we have a frame pointer, we can restore the old stack pointer
28317 from it. */
28318 else if (frame_pointer_needed)
28320 frame_reg_rtx = sp_reg_rtx;
28321 if (DEFAULT_ABI == ABI_V4)
28322 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28323 /* Prevent reordering memory accesses against stack pointer restore. */
28324 else if (cfun->calls_alloca
28325 || offset_below_red_zone_p (-info->total_size))
28326 rs6000_emit_stack_tie (frame_reg_rtx, true);
28328 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28329 GEN_INT (info->total_size)));
28330 frame_off = 0;
28332 else if (info->push_p
28333 && DEFAULT_ABI != ABI_V4
28334 && !crtl->calls_eh_return)
28336 /* Prevent reordering memory accesses against stack pointer restore. */
28337 if (cfun->calls_alloca
28338 || offset_below_red_zone_p (-info->total_size))
28339 rs6000_emit_stack_tie (frame_reg_rtx, false);
28340 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28341 GEN_INT (info->total_size)));
28342 frame_off = 0;
28344 if (insn && frame_reg_rtx == sp_reg_rtx)
28346 if (cfa_restores)
28348 REG_NOTES (insn) = cfa_restores;
28349 cfa_restores = NULL_RTX;
28351 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28352 RTX_FRAME_RELATED_P (insn) = 1;
28355 /* Restore AltiVec registers if we have not done so already. */
28356 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28357 && info->altivec_size != 0
28358 && (DEFAULT_ABI == ABI_V4
28359 || !offset_below_red_zone_p (info->altivec_save_offset)))
28361 int i;
28363 if ((strategy & REST_INLINE_VRS) == 0)
28365 int end_save = info->altivec_save_offset + info->altivec_size;
28366 int ptr_off;
28367 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28368 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28369 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28371 if (end_save + frame_off != 0)
28373 rtx offset = GEN_INT (end_save + frame_off);
28375 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28377 else
28378 emit_move_insn (ptr_reg, frame_reg_rtx);
28380 ptr_off = -end_save;
28381 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28382 info->altivec_save_offset + ptr_off,
28383 0, V4SImode, SAVRES_VR);
28384 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28386 /* Frame reg was clobbered by out-of-line save. Restore it
28387 from ptr_reg, and if we are calling out-of-line gpr or
28388 fpr restore set up the correct pointer and offset. */
28389 unsigned newptr_regno = 1;
28390 if (!restoring_GPRs_inline)
28392 bool lr = info->gp_save_offset + info->gp_size == 0;
28393 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28394 newptr_regno = ptr_regno_for_savres (sel);
28395 end_save = info->gp_save_offset + info->gp_size;
28397 else if (!restoring_FPRs_inline)
28399 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28400 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28401 newptr_regno = ptr_regno_for_savres (sel);
28402 end_save = info->fp_save_offset + info->fp_size;
28405 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28406 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28408 if (end_save + ptr_off != 0)
28410 rtx offset = GEN_INT (end_save + ptr_off);
28412 frame_off = -end_save;
28413 if (TARGET_32BIT)
28414 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28415 ptr_reg, offset));
28416 else
28417 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28418 ptr_reg, offset));
28420 else
28422 frame_off = ptr_off;
28423 emit_move_insn (frame_reg_rtx, ptr_reg);
28427 else
28429 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28430 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28432 rtx addr, areg, mem, insn;
28433 rtx reg = gen_rtx_REG (V4SImode, i);
28434 HOST_WIDE_INT offset
28435 = (info->altivec_save_offset + frame_off
28436 + 16 * (i - info->first_altivec_reg_save));
28438 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
28440 mem = gen_frame_mem (V4SImode,
28441 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28442 GEN_INT (offset)));
28443 insn = gen_rtx_SET (reg, mem);
28445 else
28447 areg = gen_rtx_REG (Pmode, 0);
28448 emit_move_insn (areg, GEN_INT (offset));
28450 /* AltiVec addressing mode is [reg+reg]. */
28451 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28452 mem = gen_frame_mem (V4SImode, addr);
28454 /* Rather than emitting a generic move, force use of the
28455 lvx instruction, which we always want. In particular we
28456 don't want lxvd2x/xxpermdi for little endian. */
28457 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28460 (void) emit_insn (insn);
28464 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28465 if (((strategy & REST_INLINE_VRS) == 0
28466 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28467 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28469 rtx reg = gen_rtx_REG (V4SImode, i);
28470 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28474 /* Restore VRSAVE if we have not done so already. */
28475 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28476 && info->vrsave_size != 0
28477 && (DEFAULT_ABI == ABI_V4
28478 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28480 rtx reg;
28482 reg = gen_rtx_REG (SImode, 12);
28483 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28484 info->vrsave_save_offset + frame_off));
28486 emit_insn (generate_set_vrsave (reg, info, 1));
28489 /* If we exit by an out-of-line restore function on ABI_V4 then that
28490 function will deallocate the stack, so we don't need to worry
28491 about the unwinder restoring cr from an invalid stack frame
28492 location. */
28493 exit_func = (!restoring_FPRs_inline
28494 || (!restoring_GPRs_inline
28495 && info->first_fp_reg_save == 64));
28497 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28498 *separate* slots if the routine calls __builtin_eh_return, so
28499 that they can be independently restored by the unwinder. */
28500 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28502 int i, cr_off = info->ehcr_offset;
28504 for (i = 0; i < 8; i++)
28505 if (!call_used_regs[CR0_REGNO + i])
28507 rtx reg = gen_rtx_REG (SImode, 0);
28508 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28509 cr_off + frame_off));
28511 insn = emit_insn (gen_movsi_to_cr_one
28512 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28514 if (!exit_func && flag_shrink_wrap)
28516 add_reg_note (insn, REG_CFA_RESTORE,
28517 gen_rtx_REG (SImode, CR0_REGNO + i));
28519 RTX_FRAME_RELATED_P (insn) = 1;
28522 cr_off += reg_size;
28526 /* Get the old lr if we saved it. If we are restoring registers
28527 out-of-line, then the out-of-line routines can do this for us. */
28528 if (restore_lr && restoring_GPRs_inline)
28529 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28531 /* Get the old cr if we saved it. */
28532 if (info->cr_save_p)
28534 unsigned cr_save_regno = 12;
28536 if (!restoring_GPRs_inline)
28538 /* Ensure we don't use the register used by the out-of-line
28539 gpr register restore below. */
28540 bool lr = info->gp_save_offset + info->gp_size == 0;
28541 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28542 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28544 if (gpr_ptr_regno == 12)
28545 cr_save_regno = 11;
28546 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28548 else if (REGNO (frame_reg_rtx) == 12)
28549 cr_save_regno = 11;
28551 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28552 info->cr_save_offset + frame_off,
28553 exit_func);
28556 /* Set LR here to try to overlap restores below. */
28557 if (restore_lr && restoring_GPRs_inline)
28558 restore_saved_lr (0, exit_func);
28560 /* Load exception handler data registers, if needed. */
28561 if (crtl->calls_eh_return)
28563 unsigned int i, regno;
28565 if (TARGET_AIX)
28567 rtx reg = gen_rtx_REG (reg_mode, 2);
28568 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28569 frame_off + RS6000_TOC_SAVE_SLOT));
28572 for (i = 0; ; ++i)
28574 rtx mem;
28576 regno = EH_RETURN_DATA_REGNO (i);
28577 if (regno == INVALID_REGNUM)
28578 break;
28580 /* Note: possible use of r0 here to address SPE regs. */
28581 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28582 info->ehrd_offset + frame_off
28583 + reg_size * (int) i);
28585 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28589 /* Restore GPRs. This is done as a PARALLEL if we are using
28590 the load-multiple instructions. */
28591 if (TARGET_SPE_ABI
28592 && info->spe_64bit_regs_used
28593 && info->first_gp_reg_save != 32)
28595 /* Determine whether we can address all of the registers that need
28596 to be saved with an offset from frame_reg_rtx that fits in
28597 the small const field for SPE memory instructions. */
28598 int spe_regs_addressable
28599 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
28600 + reg_size * (32 - info->first_gp_reg_save - 1))
28601 && restoring_GPRs_inline);
28603 if (!spe_regs_addressable)
28605 int ool_adjust = 0;
28606 rtx old_frame_reg_rtx = frame_reg_rtx;
28607 /* Make r11 point to the start of the SPE save area. We worried about
28608 not clobbering it when we were saving registers in the prologue.
28609 There's no need to worry here because the static chain is passed
28610 anew to every function. */
28612 if (!restoring_GPRs_inline)
28613 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
28614 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28615 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
28616 GEN_INT (info->spe_gp_save_offset
28617 + frame_off
28618 - ool_adjust)));
28619 /* Keep the invariant that frame_reg_rtx + frame_off points
28620 at the top of the stack frame. */
28621 frame_off = -info->spe_gp_save_offset + ool_adjust;
28624 if (restoring_GPRs_inline)
28626 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
28628 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28629 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
28631 rtx offset, addr, mem, reg;
28633 /* We're doing all this to ensure that the immediate offset
28634 fits into the immediate field of 'evldd'. */
28635 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
28637 offset = GEN_INT (spe_offset + reg_size * i);
28638 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
28639 mem = gen_rtx_MEM (V2SImode, addr);
28640 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28642 emit_move_insn (reg, mem);
28645 else
28646 rs6000_emit_savres_rtx (info, frame_reg_rtx,
28647 info->spe_gp_save_offset + frame_off,
28648 info->lr_save_offset + frame_off,
28649 reg_mode,
28650 SAVRES_GPR | SAVRES_LR);
28652 else if (!restoring_GPRs_inline)
28654 /* We are jumping to an out-of-line function. */
28655 rtx ptr_reg;
28656 int end_save = info->gp_save_offset + info->gp_size;
28657 bool can_use_exit = end_save == 0;
28658 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28659 int ptr_off;
28661 /* Emit stack reset code if we need it. */
28662 ptr_regno = ptr_regno_for_savres (sel);
28663 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28664 if (can_use_exit)
28665 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
28666 else if (end_save + frame_off != 0)
28667 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28668 GEN_INT (end_save + frame_off)));
28669 else if (REGNO (frame_reg_rtx) != ptr_regno)
28670 emit_move_insn (ptr_reg, frame_reg_rtx);
28671 if (REGNO (frame_reg_rtx) == ptr_regno)
28672 frame_off = -end_save;
28674 if (can_use_exit && info->cr_save_p)
28675 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28677 ptr_off = -end_save;
28678 rs6000_emit_savres_rtx (info, ptr_reg,
28679 info->gp_save_offset + ptr_off,
28680 info->lr_save_offset + ptr_off,
28681 reg_mode, sel);
28683 else if (using_load_multiple)
28685 rtvec p;
28686 p = rtvec_alloc (32 - info->first_gp_reg_save);
28687 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28688 RTVEC_ELT (p, i)
28689 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28690 frame_reg_rtx,
28691 info->gp_save_offset + frame_off + reg_size * i);
28692 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28694 else
28696 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28697 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
28698 emit_insn (gen_frame_load
28699 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28700 frame_reg_rtx,
28701 info->gp_save_offset + frame_off + reg_size * i));
28704 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28706 /* If the frame pointer was used then we can't delay emitting
28707 a REG_CFA_DEF_CFA note. This must happen on the insn that
28708 restores the frame pointer, r31. We may have already emitted
28709 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28710 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28711 be harmless if emitted. */
28712 if (frame_pointer_needed)
28714 insn = get_last_insn ();
28715 add_reg_note (insn, REG_CFA_DEF_CFA,
28716 plus_constant (Pmode, frame_reg_rtx, frame_off));
28717 RTX_FRAME_RELATED_P (insn) = 1;
28720 /* Set up cfa_restores. We always need these when
28721 shrink-wrapping. If not shrink-wrapping then we only need
28722 the cfa_restore when the stack location is no longer valid.
28723 The cfa_restores must be emitted on or before the insn that
28724 invalidates the stack, and of course must not be emitted
28725 before the insn that actually does the restore. The latter
28726 is why it is a bad idea to emit the cfa_restores as a group
28727 on the last instruction here that actually does a restore:
28728 That insn may be reordered with respect to others doing
28729 restores. */
28730 if (flag_shrink_wrap
28731 && !restoring_GPRs_inline
28732 && info->first_fp_reg_save == 64)
28733 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28735 for (i = info->first_gp_reg_save; i < 32; i++)
28736 if (!restoring_GPRs_inline
28737 || using_load_multiple
28738 || rs6000_reg_live_or_pic_offset_p (i))
28740 rtx reg = gen_rtx_REG (reg_mode, i);
28742 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28746 if (!restoring_GPRs_inline
28747 && info->first_fp_reg_save == 64)
28749 /* We are jumping to an out-of-line function. */
28750 if (cfa_restores)
28751 emit_cfa_restores (cfa_restores);
28752 return;
28755 if (restore_lr && !restoring_GPRs_inline)
28757 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28758 restore_saved_lr (0, exit_func);
28761 /* Restore fpr's if we need to do it without calling a function. */
28762 if (restoring_FPRs_inline)
28763 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28764 if (save_reg_p (info->first_fp_reg_save + i))
28766 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28767 ? DFmode : SFmode),
28768 info->first_fp_reg_save + i);
28769 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28770 info->fp_save_offset + frame_off + 8 * i));
28771 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28772 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28775 /* If we saved cr, restore it here. Just those that were used. */
28776 if (info->cr_save_p)
28777 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28779 /* If this is V.4, unwind the stack pointer after all of the loads
28780 have been done, or set up r11 if we are restoring fp out of line. */
28781 ptr_regno = 1;
28782 if (!restoring_FPRs_inline)
28784 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28785 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28786 ptr_regno = ptr_regno_for_savres (sel);
28789 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
28790 if (REGNO (frame_reg_rtx) == ptr_regno)
28791 frame_off = 0;
28793 if (insn && restoring_FPRs_inline)
28795 if (cfa_restores)
28797 REG_NOTES (insn) = cfa_restores;
28798 cfa_restores = NULL_RTX;
28800 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28801 RTX_FRAME_RELATED_P (insn) = 1;
28804 if (crtl->calls_eh_return)
28806 rtx sa = EH_RETURN_STACKADJ_RTX;
28807 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28810 if (!sibcall)
28812 rtvec p;
28813 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28814 if (! restoring_FPRs_inline)
28816 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
28817 RTVEC_ELT (p, 0) = ret_rtx;
28819 else
28821 if (cfa_restores)
28823 /* We can't hang the cfa_restores off a simple return,
28824 since the shrink-wrap code sometimes uses an existing
28825 return. This means there might be a path from
28826 pre-prologue code to this return, and dwarf2cfi code
28827 wants the eh_frame unwinder state to be the same on
28828 all paths to any point. So we need to emit the
28829 cfa_restores before the return. For -m64 we really
28830 don't need epilogue cfa_restores at all, except for
28831 this irritating dwarf2cfi with shrink-wrap
28832 requirement; The stack red-zone means eh_frame info
28833 from the prologue telling the unwinder to restore
28834 from the stack is perfectly good right to the end of
28835 the function. */
28836 emit_insn (gen_blockage ());
28837 emit_cfa_restores (cfa_restores);
28838 cfa_restores = NULL_RTX;
28840 p = rtvec_alloc (2);
28841 RTVEC_ELT (p, 0) = simple_return_rtx;
28844 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
28845 ? gen_rtx_USE (VOIDmode,
28846 gen_rtx_REG (Pmode, LR_REGNO))
28847 : gen_rtx_CLOBBER (VOIDmode,
28848 gen_rtx_REG (Pmode, LR_REGNO)));
28850 /* If we have to restore more than two FP registers, branch to the
28851 restore function. It will return to our caller. */
28852 if (! restoring_FPRs_inline)
28854 int i;
28855 int reg;
28856 rtx sym;
28858 if (flag_shrink_wrap)
28859 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28861 sym = rs6000_savres_routine_sym (info,
28862 SAVRES_FPR | (lr ? SAVRES_LR : 0));
28863 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
28864 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28865 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28867 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28869 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28871 RTVEC_ELT (p, i + 4)
28872 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28873 if (flag_shrink_wrap)
28874 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28875 cfa_restores);
28879 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28882 if (cfa_restores)
28884 if (sibcall)
28885 /* Ensure the cfa_restores are hung off an insn that won't
28886 be reordered above other restores. */
28887 emit_insn (gen_blockage ());
28889 emit_cfa_restores (cfa_restores);
28893 /* Write function epilogue. */
28895 static void
28896 rs6000_output_function_epilogue (FILE *file,
28897 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
28899 #if TARGET_MACHO
28900 macho_branch_islands ();
28901 /* Mach-O doesn't support labels at the end of objects, so if
28902 it looks like we might want one, insert a NOP. */
28904 rtx_insn *insn = get_last_insn ();
28905 rtx_insn *deleted_debug_label = NULL;
28906 while (insn
28907 && NOTE_P (insn)
28908 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28910 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28911 notes only, instead set their CODE_LABEL_NUMBER to -1,
28912 otherwise there would be code generation differences
28913 in between -g and -g0. */
28914 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28915 deleted_debug_label = insn;
28916 insn = PREV_INSN (insn);
28918 if (insn
28919 && (LABEL_P (insn)
28920 || (NOTE_P (insn)
28921 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
28922 fputs ("\tnop\n", file);
28923 else if (deleted_debug_label)
28924 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28925 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28926 CODE_LABEL_NUMBER (insn) = -1;
28928 #endif
28930 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28931 on its format.
28933 We don't output a traceback table if -finhibit-size-directive was
28934 used. The documentation for -finhibit-size-directive reads
28935 ``don't output a @code{.size} assembler directive, or anything
28936 else that would cause trouble if the function is split in the
28937 middle, and the two halves are placed at locations far apart in
28938 memory.'' The traceback table has this property, since it
28939 includes the offset from the start of the function to the
28940 traceback table itself.
28942 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28943 different traceback table. */
28944 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28945 && ! flag_inhibit_size_directive
28946 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28948 const char *fname = NULL;
28949 const char *language_string = lang_hooks.name;
28950 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28951 int i;
28952 int optional_tbtab;
28953 rs6000_stack_t *info = rs6000_stack_info ();
28955 if (rs6000_traceback == traceback_full)
28956 optional_tbtab = 1;
28957 else if (rs6000_traceback == traceback_part)
28958 optional_tbtab = 0;
28959 else
28960 optional_tbtab = !optimize_size && !TARGET_ELF;
28962 if (optional_tbtab)
28964 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28965 while (*fname == '.') /* V.4 encodes . in the name */
28966 fname++;
28968 /* Need label immediately before tbtab, so we can compute
28969 its offset from the function start. */
28970 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28971 ASM_OUTPUT_LABEL (file, fname);
28974 /* The .tbtab pseudo-op can only be used for the first eight
28975 expressions, since it can't handle the possibly variable
28976 length fields that follow. However, if you omit the optional
28977 fields, the assembler outputs zeros for all optional fields
28978 anyways, giving each variable length field is minimum length
28979 (as defined in sys/debug.h). Thus we can not use the .tbtab
28980 pseudo-op at all. */
28982 /* An all-zero word flags the start of the tbtab, for debuggers
28983 that have to find it by searching forward from the entry
28984 point or from the current pc. */
28985 fputs ("\t.long 0\n", file);
28987 /* Tbtab format type. Use format type 0. */
28988 fputs ("\t.byte 0,", file);
28990 /* Language type. Unfortunately, there does not seem to be any
28991 official way to discover the language being compiled, so we
28992 use language_string.
28993 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28994 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28995 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28996 either, so for now use 0. */
28997 if (lang_GNU_C ()
28998 || ! strcmp (language_string, "GNU GIMPLE")
28999 || ! strcmp (language_string, "GNU Go")
29000 || ! strcmp (language_string, "libgccjit"))
29001 i = 0;
29002 else if (! strcmp (language_string, "GNU F77")
29003 || lang_GNU_Fortran ())
29004 i = 1;
29005 else if (! strcmp (language_string, "GNU Pascal"))
29006 i = 2;
29007 else if (! strcmp (language_string, "GNU Ada"))
29008 i = 3;
29009 else if (lang_GNU_CXX ()
29010 || ! strcmp (language_string, "GNU Objective-C++"))
29011 i = 9;
29012 else if (! strcmp (language_string, "GNU Java"))
29013 i = 13;
29014 else if (! strcmp (language_string, "GNU Objective-C"))
29015 i = 14;
29016 else
29017 gcc_unreachable ();
29018 fprintf (file, "%d,", i);
29020 /* 8 single bit fields: global linkage (not set for C extern linkage,
29021 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29022 from start of procedure stored in tbtab, internal function, function
29023 has controlled storage, function has no toc, function uses fp,
29024 function logs/aborts fp operations. */
29025 /* Assume that fp operations are used if any fp reg must be saved. */
29026 fprintf (file, "%d,",
29027 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29029 /* 6 bitfields: function is interrupt handler, name present in
29030 proc table, function calls alloca, on condition directives
29031 (controls stack walks, 3 bits), saves condition reg, saves
29032 link reg. */
29033 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29034 set up as a frame pointer, even when there is no alloca call. */
29035 fprintf (file, "%d,",
29036 ((optional_tbtab << 6)
29037 | ((optional_tbtab & frame_pointer_needed) << 5)
29038 | (info->cr_save_p << 1)
29039 | (info->lr_save_p)));
29041 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29042 (6 bits). */
29043 fprintf (file, "%d,",
29044 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29046 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29047 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29049 if (optional_tbtab)
29051 /* Compute the parameter info from the function decl argument
29052 list. */
29053 tree decl;
29054 int next_parm_info_bit = 31;
29056 for (decl = DECL_ARGUMENTS (current_function_decl);
29057 decl; decl = DECL_CHAIN (decl))
29059 rtx parameter = DECL_INCOMING_RTL (decl);
29060 machine_mode mode = GET_MODE (parameter);
29062 if (GET_CODE (parameter) == REG)
29064 if (SCALAR_FLOAT_MODE_P (mode))
29066 int bits;
29068 float_parms++;
29070 switch (mode)
29072 case SFmode:
29073 case SDmode:
29074 bits = 0x2;
29075 break;
29077 case DFmode:
29078 case DDmode:
29079 case TFmode:
29080 case TDmode:
29081 case IFmode:
29082 case KFmode:
29083 bits = 0x3;
29084 break;
29086 default:
29087 gcc_unreachable ();
29090 /* If only one bit will fit, don't or in this entry. */
29091 if (next_parm_info_bit > 0)
29092 parm_info |= (bits << (next_parm_info_bit - 1));
29093 next_parm_info_bit -= 2;
29095 else
29097 fixed_parms += ((GET_MODE_SIZE (mode)
29098 + (UNITS_PER_WORD - 1))
29099 / UNITS_PER_WORD);
29100 next_parm_info_bit -= 1;
29106 /* Number of fixed point parameters. */
29107 /* This is actually the number of words of fixed point parameters; thus
29108 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29109 fprintf (file, "%d,", fixed_parms);
29111 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29112 all on stack. */
29113 /* This is actually the number of fp registers that hold parameters;
29114 and thus the maximum value is 13. */
29115 /* Set parameters on stack bit if parameters are not in their original
29116 registers, regardless of whether they are on the stack? Xlc
29117 seems to set the bit when not optimizing. */
29118 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29120 if (! optional_tbtab)
29121 return;
29123 /* Optional fields follow. Some are variable length. */
29125 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
29126 11 double float. */
29127 /* There is an entry for each parameter in a register, in the order that
29128 they occur in the parameter list. Any intervening arguments on the
29129 stack are ignored. If the list overflows a long (max possible length
29130 34 bits) then completely leave off all elements that don't fit. */
29131 /* Only emit this long if there was at least one parameter. */
29132 if (fixed_parms || float_parms)
29133 fprintf (file, "\t.long %d\n", parm_info);
29135 /* Offset from start of code to tb table. */
29136 fputs ("\t.long ", file);
29137 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29138 RS6000_OUTPUT_BASENAME (file, fname);
29139 putc ('-', file);
29140 rs6000_output_function_entry (file, fname);
29141 putc ('\n', file);
29143 /* Interrupt handler mask. */
29144 /* Omit this long, since we never set the interrupt handler bit
29145 above. */
29147 /* Number of CTL (controlled storage) anchors. */
29148 /* Omit this long, since the has_ctl bit is never set above. */
29150 /* Displacement into stack of each CTL anchor. */
29151 /* Omit this list of longs, because there are no CTL anchors. */
29153 /* Length of function name. */
29154 if (*fname == '*')
29155 ++fname;
29156 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29158 /* Function name. */
29159 assemble_string (fname, strlen (fname));
29161 /* Register for alloca automatic storage; this is always reg 31.
29162 Only emit this if the alloca bit was set above. */
29163 if (frame_pointer_needed)
29164 fputs ("\t.byte 31\n", file);
29166 fputs ("\t.align 2\n", file);
29169 /* Arrange to define .LCTOC1 label, if not already done. */
29170 if (need_toc_init)
29172 need_toc_init = 0;
29173 if (!toc_initialized)
29175 switch_to_section (toc_section);
29176 switch_to_section (current_function_section ());
29181 /* -fsplit-stack support. */
29183 /* A SYMBOL_REF for __morestack. */
29184 static GTY(()) rtx morestack_ref;
29186 static rtx
29187 gen_add3_const (rtx rt, rtx ra, long c)
29189 if (TARGET_64BIT)
29190 return gen_adddi3 (rt, ra, GEN_INT (c));
29191 else
29192 return gen_addsi3 (rt, ra, GEN_INT (c));
29195 /* Emit -fsplit-stack prologue, which goes before the regular function
29196 prologue (at local entry point in the case of ELFv2). */
29198 void
29199 rs6000_expand_split_stack_prologue (void)
29201 rs6000_stack_t *info = rs6000_stack_info ();
29202 unsigned HOST_WIDE_INT allocate;
29203 long alloc_hi, alloc_lo;
29204 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29205 rtx_insn *insn;
29207 gcc_assert (flag_split_stack && reload_completed);
29209 if (!info->push_p)
29210 return;
29212 if (global_regs[29])
29214 error ("-fsplit-stack uses register r29");
29215 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29216 "conflicts with %qD", global_regs_decl[29]);
29219 allocate = info->total_size;
29220 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29222 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29223 return;
29225 if (morestack_ref == NULL_RTX)
29227 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29228 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29229 | SYMBOL_FLAG_FUNCTION);
29232 r0 = gen_rtx_REG (Pmode, 0);
29233 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29234 r12 = gen_rtx_REG (Pmode, 12);
29235 emit_insn (gen_load_split_stack_limit (r0));
29236 /* Always emit two insns here to calculate the requested stack,
29237 so that the linker can edit them when adjusting size for calling
29238 non-split-stack code. */
29239 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29240 alloc_lo = -allocate - alloc_hi;
29241 if (alloc_hi != 0)
29243 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29244 if (alloc_lo != 0)
29245 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29246 else
29247 emit_insn (gen_nop ());
29249 else
29251 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29252 emit_insn (gen_nop ());
29255 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29256 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29257 ok_label = gen_label_rtx ();
29258 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29259 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29260 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29261 pc_rtx);
29262 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29263 JUMP_LABEL (jump) = ok_label;
29264 /* Mark the jump as very likely to be taken. */
29265 add_int_reg_note (jump, REG_BR_PROB,
29266 REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100);
29268 lr = gen_rtx_REG (Pmode, LR_REGNO);
29269 insn = emit_move_insn (r0, lr);
29270 RTX_FRAME_RELATED_P (insn) = 1;
29271 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29272 RTX_FRAME_RELATED_P (insn) = 1;
29274 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29275 const0_rtx, const0_rtx));
29276 call_fusage = NULL_RTX;
29277 use_reg (&call_fusage, r12);
29278 /* Say the call uses r0, even though it doesn't, to stop regrename
29279 from twiddling with the insns saving lr, trashing args for cfun.
29280 The insns restoring lr are similarly protected by making
29281 split_stack_return use r0. */
29282 use_reg (&call_fusage, r0);
29283 add_function_usage_to (insn, call_fusage);
29284 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29285 insn = emit_move_insn (lr, r0);
29286 add_reg_note (insn, REG_CFA_RESTORE, lr);
29287 RTX_FRAME_RELATED_P (insn) = 1;
29288 emit_insn (gen_split_stack_return ());
29290 emit_label (ok_label);
29291 LABEL_NUSES (ok_label) = 1;
29294 /* Return the internal arg pointer used for function incoming
29295 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29296 to copy it to a pseudo in order for it to be preserved over calls
29297 and suchlike. We'd really like to use a pseudo here for the
29298 internal arg pointer but data-flow analysis is not prepared to
29299 accept pseudos as live at the beginning of a function. */
29301 static rtx
29302 rs6000_internal_arg_pointer (void)
29304 if (flag_split_stack
29305 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29306 == NULL))
29309 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29311 rtx pat;
29313 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29314 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29316 /* Put the pseudo initialization right after the note at the
29317 beginning of the function. */
29318 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29319 gen_rtx_REG (Pmode, 12));
29320 push_topmost_sequence ();
29321 emit_insn_after (pat, get_insns ());
29322 pop_topmost_sequence ();
29324 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29325 FIRST_PARM_OFFSET (current_function_decl));
29327 return virtual_incoming_args_rtx;
29330 /* We may have to tell the dataflow pass that the split stack prologue
29331 is initializing a register. */
29333 static void
29334 rs6000_live_on_entry (bitmap regs)
29336 if (flag_split_stack)
29337 bitmap_set_bit (regs, 12);
29340 /* Emit -fsplit-stack dynamic stack allocation space check. */
29342 void
29343 rs6000_split_stack_space_check (rtx size, rtx label)
29345 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29346 rtx limit = gen_reg_rtx (Pmode);
29347 rtx requested = gen_reg_rtx (Pmode);
29348 rtx cmp = gen_reg_rtx (CCUNSmode);
29349 rtx jump;
29351 emit_insn (gen_load_split_stack_limit (limit));
29352 if (CONST_INT_P (size))
29353 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29354 else
29356 size = force_reg (Pmode, size);
29357 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29359 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29360 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29361 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29362 gen_rtx_LABEL_REF (VOIDmode, label),
29363 pc_rtx);
29364 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29365 JUMP_LABEL (jump) = label;
29368 /* A C compound statement that outputs the assembler code for a thunk
29369 function, used to implement C++ virtual function calls with
29370 multiple inheritance. The thunk acts as a wrapper around a virtual
29371 function, adjusting the implicit object parameter before handing
29372 control off to the real function.
29374 First, emit code to add the integer DELTA to the location that
29375 contains the incoming first argument. Assume that this argument
29376 contains a pointer, and is the one used to pass the `this' pointer
29377 in C++. This is the incoming argument *before* the function
29378 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29379 values of all other incoming arguments.
29381 After the addition, emit code to jump to FUNCTION, which is a
29382 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29383 not touch the return address. Hence returning from FUNCTION will
29384 return to whoever called the current `thunk'.
29386 The effect must be as if FUNCTION had been called directly with the
29387 adjusted first argument. This macro is responsible for emitting
29388 all of the code for a thunk function; output_function_prologue()
29389 and output_function_epilogue() are not invoked.
29391 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29392 been extracted from it.) It might possibly be useful on some
29393 targets, but probably not.
29395 If you do not define this macro, the target-independent code in the
29396 C++ frontend will generate a less efficient heavyweight thunk that
29397 calls FUNCTION instead of jumping to it. The generic approach does
29398 not support varargs. */
29400 static void
29401 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29402 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29403 tree function)
29405 rtx this_rtx, funexp;
29406 rtx_insn *insn;
29408 reload_completed = 1;
29409 epilogue_completed = 1;
29411 /* Mark the end of the (empty) prologue. */
29412 emit_note (NOTE_INSN_PROLOGUE_END);
29414 /* Find the "this" pointer. If the function returns a structure,
29415 the structure return pointer is in r3. */
29416 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29417 this_rtx = gen_rtx_REG (Pmode, 4);
29418 else
29419 this_rtx = gen_rtx_REG (Pmode, 3);
29421 /* Apply the constant offset, if required. */
29422 if (delta)
29423 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29425 /* Apply the offset from the vtable, if required. */
29426 if (vcall_offset)
29428 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29429 rtx tmp = gen_rtx_REG (Pmode, 12);
29431 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29432 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29434 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29435 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29437 else
29439 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29441 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29443 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29446 /* Generate a tail call to the target function. */
29447 if (!TREE_USED (function))
29449 assemble_external (function);
29450 TREE_USED (function) = 1;
29452 funexp = XEXP (DECL_RTL (function), 0);
29453 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29455 #if TARGET_MACHO
29456 if (MACHOPIC_INDIRECT)
29457 funexp = machopic_indirect_call_target (funexp);
29458 #endif
29460 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29461 generate sibcall RTL explicitly. */
29462 insn = emit_call_insn (
29463 gen_rtx_PARALLEL (VOIDmode,
29464 gen_rtvec (4,
29465 gen_rtx_CALL (VOIDmode,
29466 funexp, const0_rtx),
29467 gen_rtx_USE (VOIDmode, const0_rtx),
29468 gen_rtx_USE (VOIDmode,
29469 gen_rtx_REG (SImode,
29470 LR_REGNO)),
29471 simple_return_rtx)));
29472 SIBLING_CALL_P (insn) = 1;
29473 emit_barrier ();
29475 /* Run just enough of rest_of_compilation to get the insns emitted.
29476 There's not really enough bulk here to make other passes such as
29477 instruction scheduling worth while. Note that use_thunk calls
29478 assemble_start_function and assemble_end_function. */
29479 insn = get_insns ();
29480 shorten_branches (insn);
29481 final_start_function (insn, file, 1);
29482 final (insn, file, 1);
29483 final_end_function ();
29485 reload_completed = 0;
29486 epilogue_completed = 0;
29489 /* A quick summary of the various types of 'constant-pool tables'
29490 under PowerPC:
29492 Target Flags Name One table per
29493 AIX (none) AIX TOC object file
29494 AIX -mfull-toc AIX TOC object file
29495 AIX -mminimal-toc AIX minimal TOC translation unit
29496 SVR4/EABI (none) SVR4 SDATA object file
29497 SVR4/EABI -fpic SVR4 pic object file
29498 SVR4/EABI -fPIC SVR4 PIC translation unit
29499 SVR4/EABI -mrelocatable EABI TOC function
29500 SVR4/EABI -maix AIX TOC object file
29501 SVR4/EABI -maix -mminimal-toc
29502 AIX minimal TOC translation unit
29504 Name Reg. Set by entries contains:
29505 made by addrs? fp? sum?
29507 AIX TOC 2 crt0 as Y option option
29508 AIX minimal TOC 30 prolog gcc Y Y option
29509 SVR4 SDATA 13 crt0 gcc N Y N
29510 SVR4 pic 30 prolog ld Y not yet N
29511 SVR4 PIC 30 prolog gcc Y option option
29512 EABI TOC 30 prolog gcc Y option option
29516 /* Hash functions for the hash table. */
29518 static unsigned
29519 rs6000_hash_constant (rtx k)
29521 enum rtx_code code = GET_CODE (k);
29522 machine_mode mode = GET_MODE (k);
29523 unsigned result = (code << 3) ^ mode;
29524 const char *format;
29525 int flen, fidx;
29527 format = GET_RTX_FORMAT (code);
29528 flen = strlen (format);
29529 fidx = 0;
29531 switch (code)
29533 case LABEL_REF:
29534 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29536 case CONST_WIDE_INT:
29538 int i;
29539 flen = CONST_WIDE_INT_NUNITS (k);
29540 for (i = 0; i < flen; i++)
29541 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29542 return result;
29545 case CONST_DOUBLE:
29546 if (mode != VOIDmode)
29547 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29548 flen = 2;
29549 break;
29551 case CODE_LABEL:
29552 fidx = 3;
29553 break;
29555 default:
29556 break;
29559 for (; fidx < flen; fidx++)
29560 switch (format[fidx])
29562 case 's':
29564 unsigned i, len;
29565 const char *str = XSTR (k, fidx);
29566 len = strlen (str);
29567 result = result * 613 + len;
29568 for (i = 0; i < len; i++)
29569 result = result * 613 + (unsigned) str[i];
29570 break;
29572 case 'u':
29573 case 'e':
29574 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29575 break;
29576 case 'i':
29577 case 'n':
29578 result = result * 613 + (unsigned) XINT (k, fidx);
29579 break;
29580 case 'w':
29581 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29582 result = result * 613 + (unsigned) XWINT (k, fidx);
29583 else
29585 size_t i;
29586 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29587 result = result * 613 + (unsigned) (XWINT (k, fidx)
29588 >> CHAR_BIT * i);
29590 break;
29591 case '0':
29592 break;
29593 default:
29594 gcc_unreachable ();
29597 return result;
29600 hashval_t
29601 toc_hasher::hash (toc_hash_struct *thc)
29603 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29606 /* Compare H1 and H2 for equivalence. */
29608 bool
29609 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29611 rtx r1 = h1->key;
29612 rtx r2 = h2->key;
29614 if (h1->key_mode != h2->key_mode)
29615 return 0;
29617 return rtx_equal_p (r1, r2);
29620 /* These are the names given by the C++ front-end to vtables, and
29621 vtable-like objects. Ideally, this logic should not be here;
29622 instead, there should be some programmatic way of inquiring as
29623 to whether or not an object is a vtable. */
29625 #define VTABLE_NAME_P(NAME) \
29626 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29627 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29628 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29629 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29630 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29632 #ifdef NO_DOLLAR_IN_LABEL
29633 /* Return a GGC-allocated character string translating dollar signs in
29634 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29636 const char *
29637 rs6000_xcoff_strip_dollar (const char *name)
29639 char *strip, *p;
29640 const char *q;
29641 size_t len;
29643 q = (const char *) strchr (name, '$');
29645 if (q == 0 || q == name)
29646 return name;
29648 len = strlen (name);
29649 strip = XALLOCAVEC (char, len + 1);
29650 strcpy (strip, name);
29651 p = strip + (q - name);
29652 while (p)
29654 *p = '_';
29655 p = strchr (p + 1, '$');
29658 return ggc_alloc_string (strip, len);
29660 #endif
29662 void
29663 rs6000_output_symbol_ref (FILE *file, rtx x)
29665 /* Currently C++ toc references to vtables can be emitted before it
29666 is decided whether the vtable is public or private. If this is
29667 the case, then the linker will eventually complain that there is
29668 a reference to an unknown section. Thus, for vtables only,
29669 we emit the TOC reference to reference the symbol and not the
29670 section. */
29671 const char *name = XSTR (x, 0);
29673 tree decl = SYMBOL_REF_DECL (x);
29674 if (decl /* sync condition with assemble_external () */
29675 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
29676 && (TREE_CODE (decl) == VAR_DECL
29677 || TREE_CODE (decl) == FUNCTION_DECL)
29678 && name[strlen (name) - 1] != ']')
29680 name = concat (name,
29681 (TREE_CODE (decl) == FUNCTION_DECL
29682 ? "[DS]" : "[UA]"),
29683 NULL);
29684 XSTR (x, 0) = name;
29687 if (VTABLE_NAME_P (name))
29689 RS6000_OUTPUT_BASENAME (file, name);
29691 else
29692 assemble_name (file, name);
29695 /* Output a TOC entry. We derive the entry name from what is being
29696 written. */
29698 void
29699 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29701 char buf[256];
29702 const char *name = buf;
29703 rtx base = x;
29704 HOST_WIDE_INT offset = 0;
29706 gcc_assert (!TARGET_NO_TOC);
29708 /* When the linker won't eliminate them, don't output duplicate
29709 TOC entries (this happens on AIX if there is any kind of TOC,
29710 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29711 CODE_LABELs. */
29712 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29714 struct toc_hash_struct *h;
29716 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29717 time because GGC is not initialized at that point. */
29718 if (toc_hash_table == NULL)
29719 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29721 h = ggc_alloc<toc_hash_struct> ();
29722 h->key = x;
29723 h->key_mode = mode;
29724 h->labelno = labelno;
29726 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29727 if (*found == NULL)
29728 *found = h;
29729 else /* This is indeed a duplicate.
29730 Set this label equal to that label. */
29732 fputs ("\t.set ", file);
29733 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29734 fprintf (file, "%d,", labelno);
29735 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29736 fprintf (file, "%d\n", ((*found)->labelno));
29738 #ifdef HAVE_AS_TLS
29739 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29740 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29741 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29743 fputs ("\t.set ", file);
29744 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29745 fprintf (file, "%d,", labelno);
29746 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29747 fprintf (file, "%d\n", ((*found)->labelno));
29749 #endif
29750 return;
29754 /* If we're going to put a double constant in the TOC, make sure it's
29755 aligned properly when strict alignment is on. */
29756 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29757 && STRICT_ALIGNMENT
29758 && GET_MODE_BITSIZE (mode) >= 64
29759 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29760 ASM_OUTPUT_ALIGN (file, 3);
29763 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29765 /* Handle FP constants specially. Note that if we have a minimal
29766 TOC, things we put here aren't actually in the TOC, so we can allow
29767 FP constants. */
29768 if (GET_CODE (x) == CONST_DOUBLE &&
29769 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29770 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29772 long k[4];
29774 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29775 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29776 else
29777 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29779 if (TARGET_64BIT)
29781 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29782 fputs (DOUBLE_INT_ASM_OP, file);
29783 else
29784 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29785 k[0] & 0xffffffff, k[1] & 0xffffffff,
29786 k[2] & 0xffffffff, k[3] & 0xffffffff);
29787 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29788 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29789 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29790 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29791 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29792 return;
29794 else
29796 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29797 fputs ("\t.long ", file);
29798 else
29799 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29800 k[0] & 0xffffffff, k[1] & 0xffffffff,
29801 k[2] & 0xffffffff, k[3] & 0xffffffff);
29802 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29803 k[0] & 0xffffffff, k[1] & 0xffffffff,
29804 k[2] & 0xffffffff, k[3] & 0xffffffff);
29805 return;
29808 else if (GET_CODE (x) == CONST_DOUBLE &&
29809 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29811 long k[2];
29813 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29814 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29815 else
29816 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29818 if (TARGET_64BIT)
29820 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29821 fputs (DOUBLE_INT_ASM_OP, file);
29822 else
29823 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29824 k[0] & 0xffffffff, k[1] & 0xffffffff);
29825 fprintf (file, "0x%lx%08lx\n",
29826 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29827 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29828 return;
29830 else
29832 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29833 fputs ("\t.long ", file);
29834 else
29835 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29836 k[0] & 0xffffffff, k[1] & 0xffffffff);
29837 fprintf (file, "0x%lx,0x%lx\n",
29838 k[0] & 0xffffffff, k[1] & 0xffffffff);
29839 return;
29842 else if (GET_CODE (x) == CONST_DOUBLE &&
29843 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29845 long l;
29847 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29848 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29849 else
29850 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29852 if (TARGET_64BIT)
29854 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29855 fputs (DOUBLE_INT_ASM_OP, file);
29856 else
29857 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29858 if (WORDS_BIG_ENDIAN)
29859 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29860 else
29861 fprintf (file, "0x%lx\n", l & 0xffffffff);
29862 return;
29864 else
29866 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29867 fputs ("\t.long ", file);
29868 else
29869 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29870 fprintf (file, "0x%lx\n", l & 0xffffffff);
29871 return;
29874 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29876 unsigned HOST_WIDE_INT low;
29877 HOST_WIDE_INT high;
29879 low = INTVAL (x) & 0xffffffff;
29880 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29882 /* TOC entries are always Pmode-sized, so when big-endian
29883 smaller integer constants in the TOC need to be padded.
29884 (This is still a win over putting the constants in
29885 a separate constant pool, because then we'd have
29886 to have both a TOC entry _and_ the actual constant.)
29888 For a 32-bit target, CONST_INT values are loaded and shifted
29889 entirely within `low' and can be stored in one TOC entry. */
29891 /* It would be easy to make this work, but it doesn't now. */
29892 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29894 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29896 low |= high << 32;
29897 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29898 high = (HOST_WIDE_INT) low >> 32;
29899 low &= 0xffffffff;
29902 if (TARGET_64BIT)
29904 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29905 fputs (DOUBLE_INT_ASM_OP, file);
29906 else
29907 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29908 (long) high & 0xffffffff, (long) low & 0xffffffff);
29909 fprintf (file, "0x%lx%08lx\n",
29910 (long) high & 0xffffffff, (long) low & 0xffffffff);
29911 return;
29913 else
29915 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29917 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29918 fputs ("\t.long ", file);
29919 else
29920 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29921 (long) high & 0xffffffff, (long) low & 0xffffffff);
29922 fprintf (file, "0x%lx,0x%lx\n",
29923 (long) high & 0xffffffff, (long) low & 0xffffffff);
29925 else
29927 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29928 fputs ("\t.long ", file);
29929 else
29930 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29931 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29933 return;
29937 if (GET_CODE (x) == CONST)
29939 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29940 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29942 base = XEXP (XEXP (x, 0), 0);
29943 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29946 switch (GET_CODE (base))
29948 case SYMBOL_REF:
29949 name = XSTR (base, 0);
29950 break;
29952 case LABEL_REF:
29953 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29954 CODE_LABEL_NUMBER (XEXP (base, 0)));
29955 break;
29957 case CODE_LABEL:
29958 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29959 break;
29961 default:
29962 gcc_unreachable ();
29965 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29966 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29967 else
29969 fputs ("\t.tc ", file);
29970 RS6000_OUTPUT_BASENAME (file, name);
29972 if (offset < 0)
29973 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29974 else if (offset)
29975 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29977 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29978 after other TOC symbols, reducing overflow of small TOC access
29979 to [TC] symbols. */
29980 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29981 ? "[TE]," : "[TC],", file);
29984 /* Currently C++ toc references to vtables can be emitted before it
29985 is decided whether the vtable is public or private. If this is
29986 the case, then the linker will eventually complain that there is
29987 a TOC reference to an unknown section. Thus, for vtables only,
29988 we emit the TOC reference to reference the symbol and not the
29989 section. */
29990 if (VTABLE_NAME_P (name))
29992 RS6000_OUTPUT_BASENAME (file, name);
29993 if (offset < 0)
29994 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29995 else if (offset > 0)
29996 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29998 else
29999 output_addr_const (file, x);
30001 #if HAVE_AS_TLS
30002 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
30004 switch (SYMBOL_REF_TLS_MODEL (base))
30006 case 0:
30007 break;
30008 case TLS_MODEL_LOCAL_EXEC:
30009 fputs ("@le", file);
30010 break;
30011 case TLS_MODEL_INITIAL_EXEC:
30012 fputs ("@ie", file);
30013 break;
30014 /* Use global-dynamic for local-dynamic. */
30015 case TLS_MODEL_GLOBAL_DYNAMIC:
30016 case TLS_MODEL_LOCAL_DYNAMIC:
30017 putc ('\n', file);
30018 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30019 fputs ("\t.tc .", file);
30020 RS6000_OUTPUT_BASENAME (file, name);
30021 fputs ("[TC],", file);
30022 output_addr_const (file, x);
30023 fputs ("@m", file);
30024 break;
30025 default:
30026 gcc_unreachable ();
30029 #endif
30031 putc ('\n', file);
30034 /* Output an assembler pseudo-op to write an ASCII string of N characters
30035 starting at P to FILE.
30037 On the RS/6000, we have to do this using the .byte operation and
30038 write out special characters outside the quoted string.
30039 Also, the assembler is broken; very long strings are truncated,
30040 so we must artificially break them up early. */
30042 void
30043 output_ascii (FILE *file, const char *p, int n)
30045 char c;
30046 int i, count_string;
30047 const char *for_string = "\t.byte \"";
30048 const char *for_decimal = "\t.byte ";
30049 const char *to_close = NULL;
30051 count_string = 0;
30052 for (i = 0; i < n; i++)
30054 c = *p++;
30055 if (c >= ' ' && c < 0177)
30057 if (for_string)
30058 fputs (for_string, file);
30059 putc (c, file);
30061 /* Write two quotes to get one. */
30062 if (c == '"')
30064 putc (c, file);
30065 ++count_string;
30068 for_string = NULL;
30069 for_decimal = "\"\n\t.byte ";
30070 to_close = "\"\n";
30071 ++count_string;
30073 if (count_string >= 512)
30075 fputs (to_close, file);
30077 for_string = "\t.byte \"";
30078 for_decimal = "\t.byte ";
30079 to_close = NULL;
30080 count_string = 0;
30083 else
30085 if (for_decimal)
30086 fputs (for_decimal, file);
30087 fprintf (file, "%d", c);
30089 for_string = "\n\t.byte \"";
30090 for_decimal = ", ";
30091 to_close = "\n";
30092 count_string = 0;
30096 /* Now close the string if we have written one. Then end the line. */
30097 if (to_close)
30098 fputs (to_close, file);
30101 /* Generate a unique section name for FILENAME for a section type
30102 represented by SECTION_DESC. Output goes into BUF.
30104 SECTION_DESC can be any string, as long as it is different for each
30105 possible section type.
30107 We name the section in the same manner as xlc. The name begins with an
30108 underscore followed by the filename (after stripping any leading directory
30109 names) with the last period replaced by the string SECTION_DESC. If
30110 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30111 the name. */
30113 void
30114 rs6000_gen_section_name (char **buf, const char *filename,
30115 const char *section_desc)
30117 const char *q, *after_last_slash, *last_period = 0;
30118 char *p;
30119 int len;
30121 after_last_slash = filename;
30122 for (q = filename; *q; q++)
30124 if (*q == '/')
30125 after_last_slash = q + 1;
30126 else if (*q == '.')
30127 last_period = q;
30130 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30131 *buf = (char *) xmalloc (len);
30133 p = *buf;
30134 *p++ = '_';
30136 for (q = after_last_slash; *q; q++)
30138 if (q == last_period)
30140 strcpy (p, section_desc);
30141 p += strlen (section_desc);
30142 break;
30145 else if (ISALNUM (*q))
30146 *p++ = *q;
30149 if (last_period == 0)
30150 strcpy (p, section_desc);
30151 else
30152 *p = '\0';
30155 /* Emit profile function. */
30157 void
30158 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30160 /* Non-standard profiling for kernels, which just saves LR then calls
30161 _mcount without worrying about arg saves. The idea is to change
30162 the function prologue as little as possible as it isn't easy to
30163 account for arg save/restore code added just for _mcount. */
30164 if (TARGET_PROFILE_KERNEL)
30165 return;
30167 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30169 #ifndef NO_PROFILE_COUNTERS
30170 # define NO_PROFILE_COUNTERS 0
30171 #endif
30172 if (NO_PROFILE_COUNTERS)
30173 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30174 LCT_NORMAL, VOIDmode, 0);
30175 else
30177 char buf[30];
30178 const char *label_name;
30179 rtx fun;
30181 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30182 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30183 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30185 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30186 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
30189 else if (DEFAULT_ABI == ABI_DARWIN)
30191 const char *mcount_name = RS6000_MCOUNT;
30192 int caller_addr_regno = LR_REGNO;
30194 /* Be conservative and always set this, at least for now. */
30195 crtl->uses_pic_offset_table = 1;
30197 #if TARGET_MACHO
30198 /* For PIC code, set up a stub and collect the caller's address
30199 from r0, which is where the prologue puts it. */
30200 if (MACHOPIC_INDIRECT
30201 && crtl->uses_pic_offset_table)
30202 caller_addr_regno = 0;
30203 #endif
30204 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30205 LCT_NORMAL, VOIDmode, 1,
30206 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30210 /* Write function profiler code. */
30212 void
30213 output_function_profiler (FILE *file, int labelno)
30215 char buf[100];
30217 switch (DEFAULT_ABI)
30219 default:
30220 gcc_unreachable ();
30222 case ABI_V4:
30223 if (!TARGET_32BIT)
30225 warning (0, "no profiling of 64-bit code for this ABI");
30226 return;
30228 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30229 fprintf (file, "\tmflr %s\n", reg_names[0]);
30230 if (NO_PROFILE_COUNTERS)
30232 asm_fprintf (file, "\tstw %s,4(%s)\n",
30233 reg_names[0], reg_names[1]);
30235 else if (TARGET_SECURE_PLT && flag_pic)
30237 if (TARGET_LINK_STACK)
30239 char name[32];
30240 get_ppc476_thunk_name (name);
30241 asm_fprintf (file, "\tbl %s\n", name);
30243 else
30244 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30245 asm_fprintf (file, "\tstw %s,4(%s)\n",
30246 reg_names[0], reg_names[1]);
30247 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30248 asm_fprintf (file, "\taddis %s,%s,",
30249 reg_names[12], reg_names[12]);
30250 assemble_name (file, buf);
30251 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30252 assemble_name (file, buf);
30253 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30255 else if (flag_pic == 1)
30257 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30258 asm_fprintf (file, "\tstw %s,4(%s)\n",
30259 reg_names[0], reg_names[1]);
30260 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30261 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30262 assemble_name (file, buf);
30263 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30265 else if (flag_pic > 1)
30267 asm_fprintf (file, "\tstw %s,4(%s)\n",
30268 reg_names[0], reg_names[1]);
30269 /* Now, we need to get the address of the label. */
30270 if (TARGET_LINK_STACK)
30272 char name[32];
30273 get_ppc476_thunk_name (name);
30274 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30275 assemble_name (file, buf);
30276 fputs ("-.\n1:", file);
30277 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30278 asm_fprintf (file, "\taddi %s,%s,4\n",
30279 reg_names[11], reg_names[11]);
30281 else
30283 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30284 assemble_name (file, buf);
30285 fputs ("-.\n1:", file);
30286 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30288 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30289 reg_names[0], reg_names[11]);
30290 asm_fprintf (file, "\tadd %s,%s,%s\n",
30291 reg_names[0], reg_names[0], reg_names[11]);
30293 else
30295 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30296 assemble_name (file, buf);
30297 fputs ("@ha\n", file);
30298 asm_fprintf (file, "\tstw %s,4(%s)\n",
30299 reg_names[0], reg_names[1]);
30300 asm_fprintf (file, "\tla %s,", reg_names[0]);
30301 assemble_name (file, buf);
30302 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30305 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30306 fprintf (file, "\tbl %s%s\n",
30307 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30308 break;
30310 case ABI_AIX:
30311 case ABI_ELFv2:
30312 case ABI_DARWIN:
30313 /* Don't do anything, done in output_profile_hook (). */
30314 break;
30320 /* The following variable value is the last issued insn. */
30322 static rtx_insn *last_scheduled_insn;
30324 /* The following variable helps to balance issuing of load and
30325 store instructions */
30327 static int load_store_pendulum;
30329 /* The following variable helps pair divide insns during scheduling. */
30330 static int divide_cnt;
30331 /* The following variable helps pair and alternate vector and vector load
30332 insns during scheduling. */
30333 static int vec_load_pendulum;
30336 /* Power4 load update and store update instructions are cracked into a
30337 load or store and an integer insn which are executed in the same cycle.
30338 Branches have their own dispatch slot which does not count against the
30339 GCC issue rate, but it changes the program flow so there are no other
30340 instructions to issue in this cycle. */
30342 static int
30343 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30345 last_scheduled_insn = insn;
30346 if (GET_CODE (PATTERN (insn)) == USE
30347 || GET_CODE (PATTERN (insn)) == CLOBBER)
30349 cached_can_issue_more = more;
30350 return cached_can_issue_more;
30353 if (insn_terminates_group_p (insn, current_group))
30355 cached_can_issue_more = 0;
30356 return cached_can_issue_more;
30359 /* If no reservation, but reach here */
30360 if (recog_memoized (insn) < 0)
30361 return more;
30363 if (rs6000_sched_groups)
30365 if (is_microcoded_insn (insn))
30366 cached_can_issue_more = 0;
30367 else if (is_cracked_insn (insn))
30368 cached_can_issue_more = more > 2 ? more - 2 : 0;
30369 else
30370 cached_can_issue_more = more - 1;
30372 return cached_can_issue_more;
30375 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30376 return 0;
30378 cached_can_issue_more = more - 1;
30379 return cached_can_issue_more;
30382 static int
30383 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30385 int r = rs6000_variable_issue_1 (insn, more);
30386 if (verbose)
30387 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30388 return r;
30391 /* Adjust the cost of a scheduling dependency. Return the new cost of
30392 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30394 static int
30395 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30396 unsigned int)
30398 enum attr_type attr_type;
30400 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30401 return cost;
30403 switch (dep_type)
30405 case REG_DEP_TRUE:
30407 /* Data dependency; DEP_INSN writes a register that INSN reads
30408 some cycles later. */
30410 /* Separate a load from a narrower, dependent store. */
30411 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30412 && GET_CODE (PATTERN (insn)) == SET
30413 && GET_CODE (PATTERN (dep_insn)) == SET
30414 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30415 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30416 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30417 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30418 return cost + 14;
30420 attr_type = get_attr_type (insn);
30422 switch (attr_type)
30424 case TYPE_JMPREG:
30425 /* Tell the first scheduling pass about the latency between
30426 a mtctr and bctr (and mtlr and br/blr). The first
30427 scheduling pass will not know about this latency since
30428 the mtctr instruction, which has the latency associated
30429 to it, will be generated by reload. */
30430 return 4;
30431 case TYPE_BRANCH:
30432 /* Leave some extra cycles between a compare and its
30433 dependent branch, to inhibit expensive mispredicts. */
30434 if ((rs6000_cpu_attr == CPU_PPC603
30435 || rs6000_cpu_attr == CPU_PPC604
30436 || rs6000_cpu_attr == CPU_PPC604E
30437 || rs6000_cpu_attr == CPU_PPC620
30438 || rs6000_cpu_attr == CPU_PPC630
30439 || rs6000_cpu_attr == CPU_PPC750
30440 || rs6000_cpu_attr == CPU_PPC7400
30441 || rs6000_cpu_attr == CPU_PPC7450
30442 || rs6000_cpu_attr == CPU_PPCE5500
30443 || rs6000_cpu_attr == CPU_PPCE6500
30444 || rs6000_cpu_attr == CPU_POWER4
30445 || rs6000_cpu_attr == CPU_POWER5
30446 || rs6000_cpu_attr == CPU_POWER7
30447 || rs6000_cpu_attr == CPU_POWER8
30448 || rs6000_cpu_attr == CPU_POWER9
30449 || rs6000_cpu_attr == CPU_CELL)
30450 && recog_memoized (dep_insn)
30451 && (INSN_CODE (dep_insn) >= 0))
30453 switch (get_attr_type (dep_insn))
30455 case TYPE_CMP:
30456 case TYPE_FPCOMPARE:
30457 case TYPE_CR_LOGICAL:
30458 case TYPE_DELAYED_CR:
30459 return cost + 2;
30460 case TYPE_EXTS:
30461 case TYPE_MUL:
30462 if (get_attr_dot (dep_insn) == DOT_YES)
30463 return cost + 2;
30464 else
30465 break;
30466 case TYPE_SHIFT:
30467 if (get_attr_dot (dep_insn) == DOT_YES
30468 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30469 return cost + 2;
30470 else
30471 break;
30472 default:
30473 break;
30475 break;
30477 case TYPE_STORE:
30478 case TYPE_FPSTORE:
30479 if ((rs6000_cpu == PROCESSOR_POWER6)
30480 && recog_memoized (dep_insn)
30481 && (INSN_CODE (dep_insn) >= 0))
30484 if (GET_CODE (PATTERN (insn)) != SET)
30485 /* If this happens, we have to extend this to schedule
30486 optimally. Return default for now. */
30487 return cost;
30489 /* Adjust the cost for the case where the value written
30490 by a fixed point operation is used as the address
30491 gen value on a store. */
30492 switch (get_attr_type (dep_insn))
30494 case TYPE_LOAD:
30495 case TYPE_CNTLZ:
30497 if (! store_data_bypass_p (dep_insn, insn))
30498 return get_attr_sign_extend (dep_insn)
30499 == SIGN_EXTEND_YES ? 6 : 4;
30500 break;
30502 case TYPE_SHIFT:
30504 if (! store_data_bypass_p (dep_insn, insn))
30505 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30506 6 : 3;
30507 break;
30509 case TYPE_INTEGER:
30510 case TYPE_ADD:
30511 case TYPE_LOGICAL:
30512 case TYPE_EXTS:
30513 case TYPE_INSERT:
30515 if (! store_data_bypass_p (dep_insn, insn))
30516 return 3;
30517 break;
30519 case TYPE_STORE:
30520 case TYPE_FPLOAD:
30521 case TYPE_FPSTORE:
30523 if (get_attr_update (dep_insn) == UPDATE_YES
30524 && ! store_data_bypass_p (dep_insn, insn))
30525 return 3;
30526 break;
30528 case TYPE_MUL:
30530 if (! store_data_bypass_p (dep_insn, insn))
30531 return 17;
30532 break;
30534 case TYPE_DIV:
30536 if (! store_data_bypass_p (dep_insn, insn))
30537 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30538 break;
30540 default:
30541 break;
30544 break;
30546 case TYPE_LOAD:
30547 if ((rs6000_cpu == PROCESSOR_POWER6)
30548 && recog_memoized (dep_insn)
30549 && (INSN_CODE (dep_insn) >= 0))
30552 /* Adjust the cost for the case where the value written
30553 by a fixed point instruction is used within the address
30554 gen portion of a subsequent load(u)(x) */
30555 switch (get_attr_type (dep_insn))
30557 case TYPE_LOAD:
30558 case TYPE_CNTLZ:
30560 if (set_to_load_agen (dep_insn, insn))
30561 return get_attr_sign_extend (dep_insn)
30562 == SIGN_EXTEND_YES ? 6 : 4;
30563 break;
30565 case TYPE_SHIFT:
30567 if (set_to_load_agen (dep_insn, insn))
30568 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30569 6 : 3;
30570 break;
30572 case TYPE_INTEGER:
30573 case TYPE_ADD:
30574 case TYPE_LOGICAL:
30575 case TYPE_EXTS:
30576 case TYPE_INSERT:
30578 if (set_to_load_agen (dep_insn, insn))
30579 return 3;
30580 break;
30582 case TYPE_STORE:
30583 case TYPE_FPLOAD:
30584 case TYPE_FPSTORE:
30586 if (get_attr_update (dep_insn) == UPDATE_YES
30587 && set_to_load_agen (dep_insn, insn))
30588 return 3;
30589 break;
30591 case TYPE_MUL:
30593 if (set_to_load_agen (dep_insn, insn))
30594 return 17;
30595 break;
30597 case TYPE_DIV:
30599 if (set_to_load_agen (dep_insn, insn))
30600 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30601 break;
30603 default:
30604 break;
30607 break;
30609 case TYPE_FPLOAD:
30610 if ((rs6000_cpu == PROCESSOR_POWER6)
30611 && get_attr_update (insn) == UPDATE_NO
30612 && recog_memoized (dep_insn)
30613 && (INSN_CODE (dep_insn) >= 0)
30614 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30615 return 2;
30617 default:
30618 break;
30621 /* Fall out to return default cost. */
30623 break;
30625 case REG_DEP_OUTPUT:
30626 /* Output dependency; DEP_INSN writes a register that INSN writes some
30627 cycles later. */
30628 if ((rs6000_cpu == PROCESSOR_POWER6)
30629 && recog_memoized (dep_insn)
30630 && (INSN_CODE (dep_insn) >= 0))
30632 attr_type = get_attr_type (insn);
30634 switch (attr_type)
30636 case TYPE_FP:
30637 case TYPE_FPSIMPLE:
30638 if (get_attr_type (dep_insn) == TYPE_FP
30639 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30640 return 1;
30641 break;
30642 case TYPE_FPLOAD:
30643 if (get_attr_update (insn) == UPDATE_NO
30644 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30645 return 2;
30646 break;
30647 default:
30648 break;
30651 /* Fall through, no cost for output dependency. */
30653 case REG_DEP_ANTI:
30654 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30655 cycles later. */
30656 return 0;
30658 default:
30659 gcc_unreachable ();
30662 return cost;
30665 /* Debug version of rs6000_adjust_cost. */
30667 static int
30668 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30669 int cost, unsigned int dw)
30671 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30673 if (ret != cost)
30675 const char *dep;
30677 switch (dep_type)
30679 default: dep = "unknown depencency"; break;
30680 case REG_DEP_TRUE: dep = "data dependency"; break;
30681 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30682 case REG_DEP_ANTI: dep = "anti depencency"; break;
30685 fprintf (stderr,
30686 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30687 "%s, insn:\n", ret, cost, dep);
30689 debug_rtx (insn);
30692 return ret;
30695 /* The function returns a true if INSN is microcoded.
30696 Return false otherwise. */
30698 static bool
30699 is_microcoded_insn (rtx_insn *insn)
30701 if (!insn || !NONDEBUG_INSN_P (insn)
30702 || GET_CODE (PATTERN (insn)) == USE
30703 || GET_CODE (PATTERN (insn)) == CLOBBER)
30704 return false;
30706 if (rs6000_cpu_attr == CPU_CELL)
30707 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30709 if (rs6000_sched_groups
30710 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30712 enum attr_type type = get_attr_type (insn);
30713 if ((type == TYPE_LOAD
30714 && get_attr_update (insn) == UPDATE_YES
30715 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30716 || ((type == TYPE_LOAD || type == TYPE_STORE)
30717 && get_attr_update (insn) == UPDATE_YES
30718 && get_attr_indexed (insn) == INDEXED_YES)
30719 || type == TYPE_MFCR)
30720 return true;
30723 return false;
30726 /* The function returns true if INSN is cracked into 2 instructions
30727 by the processor (and therefore occupies 2 issue slots). */
30729 static bool
30730 is_cracked_insn (rtx_insn *insn)
30732 if (!insn || !NONDEBUG_INSN_P (insn)
30733 || GET_CODE (PATTERN (insn)) == USE
30734 || GET_CODE (PATTERN (insn)) == CLOBBER)
30735 return false;
30737 if (rs6000_sched_groups
30738 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30740 enum attr_type type = get_attr_type (insn);
30741 if ((type == TYPE_LOAD
30742 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30743 && get_attr_update (insn) == UPDATE_NO)
30744 || (type == TYPE_LOAD
30745 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30746 && get_attr_update (insn) == UPDATE_YES
30747 && get_attr_indexed (insn) == INDEXED_NO)
30748 || (type == TYPE_STORE
30749 && get_attr_update (insn) == UPDATE_YES
30750 && get_attr_indexed (insn) == INDEXED_NO)
30751 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30752 && get_attr_update (insn) == UPDATE_YES)
30753 || type == TYPE_DELAYED_CR
30754 || (type == TYPE_EXTS
30755 && get_attr_dot (insn) == DOT_YES)
30756 || (type == TYPE_SHIFT
30757 && get_attr_dot (insn) == DOT_YES
30758 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30759 || (type == TYPE_MUL
30760 && get_attr_dot (insn) == DOT_YES)
30761 || type == TYPE_DIV
30762 || (type == TYPE_INSERT
30763 && get_attr_size (insn) == SIZE_32))
30764 return true;
30767 return false;
30770 /* The function returns true if INSN can be issued only from
30771 the branch slot. */
30773 static bool
30774 is_branch_slot_insn (rtx_insn *insn)
30776 if (!insn || !NONDEBUG_INSN_P (insn)
30777 || GET_CODE (PATTERN (insn)) == USE
30778 || GET_CODE (PATTERN (insn)) == CLOBBER)
30779 return false;
30781 if (rs6000_sched_groups)
30783 enum attr_type type = get_attr_type (insn);
30784 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30785 return true;
30786 return false;
30789 return false;
30792 /* The function returns true if out_inst sets a value that is
30793 used in the address generation computation of in_insn */
30794 static bool
30795 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30797 rtx out_set, in_set;
30799 /* For performance reasons, only handle the simple case where
30800 both loads are a single_set. */
30801 out_set = single_set (out_insn);
30802 if (out_set)
30804 in_set = single_set (in_insn);
30805 if (in_set)
30806 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30809 return false;
30812 /* Try to determine base/offset/size parts of the given MEM.
30813 Return true if successful, false if all the values couldn't
30814 be determined.
30816 This function only looks for REG or REG+CONST address forms.
30817 REG+REG address form will return false. */
30819 static bool
30820 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30821 HOST_WIDE_INT *size)
30823 rtx addr_rtx;
30824 if MEM_SIZE_KNOWN_P (mem)
30825 *size = MEM_SIZE (mem);
30826 else
30827 return false;
30829 addr_rtx = (XEXP (mem, 0));
30830 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30831 addr_rtx = XEXP (addr_rtx, 1);
30833 *offset = 0;
30834 while (GET_CODE (addr_rtx) == PLUS
30835 && CONST_INT_P (XEXP (addr_rtx, 1)))
30837 *offset += INTVAL (XEXP (addr_rtx, 1));
30838 addr_rtx = XEXP (addr_rtx, 0);
30840 if (!REG_P (addr_rtx))
30841 return false;
30843 *base = addr_rtx;
30844 return true;
30847 /* The function returns true if the target storage location of
30848 mem1 is adjacent to the target storage location of mem2 */
30849 /* Return 1 if memory locations are adjacent. */
30851 static bool
30852 adjacent_mem_locations (rtx mem1, rtx mem2)
30854 rtx reg1, reg2;
30855 HOST_WIDE_INT off1, size1, off2, size2;
30857 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30858 && get_memref_parts (mem2, &reg2, &off2, &size2))
30859 return ((REGNO (reg1) == REGNO (reg2))
30860 && ((off1 + size1 == off2)
30861 || (off2 + size2 == off1)));
30863 return false;
30866 /* This function returns true if it can be determined that the two MEM
30867 locations overlap by at least 1 byte based on base reg/offset/size. */
30869 static bool
30870 mem_locations_overlap (rtx mem1, rtx mem2)
30872 rtx reg1, reg2;
30873 HOST_WIDE_INT off1, size1, off2, size2;
30875 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30876 && get_memref_parts (mem2, &reg2, &off2, &size2))
30877 return ((REGNO (reg1) == REGNO (reg2))
30878 && (((off1 <= off2) && (off1 + size1 > off2))
30879 || ((off2 <= off1) && (off2 + size2 > off1))));
30881 return false;
30884 /* A C statement (sans semicolon) to update the integer scheduling
30885 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30886 INSN earlier, reduce the priority to execute INSN later. Do not
30887 define this macro if you do not need to adjust the scheduling
30888 priorities of insns. */
30890 static int
30891 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30893 rtx load_mem, str_mem;
30894 /* On machines (like the 750) which have asymmetric integer units,
30895 where one integer unit can do multiply and divides and the other
30896 can't, reduce the priority of multiply/divide so it is scheduled
30897 before other integer operations. */
30899 #if 0
30900 if (! INSN_P (insn))
30901 return priority;
30903 if (GET_CODE (PATTERN (insn)) == USE)
30904 return priority;
30906 switch (rs6000_cpu_attr) {
30907 case CPU_PPC750:
30908 switch (get_attr_type (insn))
30910 default:
30911 break;
30913 case TYPE_MUL:
30914 case TYPE_DIV:
30915 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30916 priority, priority);
30917 if (priority >= 0 && priority < 0x01000000)
30918 priority >>= 3;
30919 break;
30922 #endif
30924 if (insn_must_be_first_in_group (insn)
30925 && reload_completed
30926 && current_sched_info->sched_max_insns_priority
30927 && rs6000_sched_restricted_insns_priority)
30930 /* Prioritize insns that can be dispatched only in the first
30931 dispatch slot. */
30932 if (rs6000_sched_restricted_insns_priority == 1)
30933 /* Attach highest priority to insn. This means that in
30934 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30935 precede 'priority' (critical path) considerations. */
30936 return current_sched_info->sched_max_insns_priority;
30937 else if (rs6000_sched_restricted_insns_priority == 2)
30938 /* Increase priority of insn by a minimal amount. This means that in
30939 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30940 considerations precede dispatch-slot restriction considerations. */
30941 return (priority + 1);
30944 if (rs6000_cpu == PROCESSOR_POWER6
30945 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30946 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30947 /* Attach highest priority to insn if the scheduler has just issued two
30948 stores and this instruction is a load, or two loads and this instruction
30949 is a store. Power6 wants loads and stores scheduled alternately
30950 when possible */
30951 return current_sched_info->sched_max_insns_priority;
30953 return priority;
30956 /* Return true if the instruction is nonpipelined on the Cell. */
30957 static bool
30958 is_nonpipeline_insn (rtx_insn *insn)
30960 enum attr_type type;
30961 if (!insn || !NONDEBUG_INSN_P (insn)
30962 || GET_CODE (PATTERN (insn)) == USE
30963 || GET_CODE (PATTERN (insn)) == CLOBBER)
30964 return false;
30966 type = get_attr_type (insn);
30967 if (type == TYPE_MUL
30968 || type == TYPE_DIV
30969 || type == TYPE_SDIV
30970 || type == TYPE_DDIV
30971 || type == TYPE_SSQRT
30972 || type == TYPE_DSQRT
30973 || type == TYPE_MFCR
30974 || type == TYPE_MFCRF
30975 || type == TYPE_MFJMPR)
30977 return true;
30979 return false;
30983 /* Return how many instructions the machine can issue per cycle. */
30985 static int
30986 rs6000_issue_rate (void)
30988 /* Unless scheduling for register pressure, use issue rate of 1 for
30989 first scheduling pass to decrease degradation. */
30990 if (!reload_completed && !flag_sched_pressure)
30991 return 1;
30993 switch (rs6000_cpu_attr) {
30994 case CPU_RS64A:
30995 case CPU_PPC601: /* ? */
30996 case CPU_PPC7450:
30997 return 3;
30998 case CPU_PPC440:
30999 case CPU_PPC603:
31000 case CPU_PPC750:
31001 case CPU_PPC7400:
31002 case CPU_PPC8540:
31003 case CPU_PPC8548:
31004 case CPU_CELL:
31005 case CPU_PPCE300C2:
31006 case CPU_PPCE300C3:
31007 case CPU_PPCE500MC:
31008 case CPU_PPCE500MC64:
31009 case CPU_PPCE5500:
31010 case CPU_PPCE6500:
31011 case CPU_TITAN:
31012 return 2;
31013 case CPU_PPC476:
31014 case CPU_PPC604:
31015 case CPU_PPC604E:
31016 case CPU_PPC620:
31017 case CPU_PPC630:
31018 return 4;
31019 case CPU_POWER4:
31020 case CPU_POWER5:
31021 case CPU_POWER6:
31022 case CPU_POWER7:
31023 return 5;
31024 case CPU_POWER8:
31025 return 7;
31026 case CPU_POWER9:
31027 return 6;
31028 default:
31029 return 1;
31033 /* Return how many instructions to look ahead for better insn
31034 scheduling. */
31036 static int
31037 rs6000_use_sched_lookahead (void)
31039 switch (rs6000_cpu_attr)
31041 case CPU_PPC8540:
31042 case CPU_PPC8548:
31043 return 4;
31045 case CPU_CELL:
31046 return (reload_completed ? 8 : 0);
31048 default:
31049 return 0;
31053 /* We are choosing insn from the ready queue. Return zero if INSN can be
31054 chosen. */
31055 static int
31056 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31058 if (ready_index == 0)
31059 return 0;
31061 if (rs6000_cpu_attr != CPU_CELL)
31062 return 0;
31064 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31066 if (!reload_completed
31067 || is_nonpipeline_insn (insn)
31068 || is_microcoded_insn (insn))
31069 return 1;
31071 return 0;
31074 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31075 and return true. */
31077 static bool
31078 find_mem_ref (rtx pat, rtx *mem_ref)
31080 const char * fmt;
31081 int i, j;
31083 /* stack_tie does not produce any real memory traffic. */
31084 if (tie_operand (pat, VOIDmode))
31085 return false;
31087 if (GET_CODE (pat) == MEM)
31089 *mem_ref = pat;
31090 return true;
31093 /* Recursively process the pattern. */
31094 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31096 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31098 if (fmt[i] == 'e')
31100 if (find_mem_ref (XEXP (pat, i), mem_ref))
31101 return true;
31103 else if (fmt[i] == 'E')
31104 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31106 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31107 return true;
31111 return false;
31114 /* Determine if PAT is a PATTERN of a load insn. */
31116 static bool
31117 is_load_insn1 (rtx pat, rtx *load_mem)
31119 if (!pat || pat == NULL_RTX)
31120 return false;
31122 if (GET_CODE (pat) == SET)
31123 return find_mem_ref (SET_SRC (pat), load_mem);
31125 if (GET_CODE (pat) == PARALLEL)
31127 int i;
31129 for (i = 0; i < XVECLEN (pat, 0); i++)
31130 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31131 return true;
31134 return false;
31137 /* Determine if INSN loads from memory. */
31139 static bool
31140 is_load_insn (rtx insn, rtx *load_mem)
31142 if (!insn || !INSN_P (insn))
31143 return false;
31145 if (CALL_P (insn))
31146 return false;
31148 return is_load_insn1 (PATTERN (insn), load_mem);
31151 /* Determine if PAT is a PATTERN of a store insn. */
31153 static bool
31154 is_store_insn1 (rtx pat, rtx *str_mem)
31156 if (!pat || pat == NULL_RTX)
31157 return false;
31159 if (GET_CODE (pat) == SET)
31160 return find_mem_ref (SET_DEST (pat), str_mem);
31162 if (GET_CODE (pat) == PARALLEL)
31164 int i;
31166 for (i = 0; i < XVECLEN (pat, 0); i++)
31167 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31168 return true;
31171 return false;
31174 /* Determine if INSN stores to memory. */
31176 static bool
31177 is_store_insn (rtx insn, rtx *str_mem)
31179 if (!insn || !INSN_P (insn))
31180 return false;
31182 return is_store_insn1 (PATTERN (insn), str_mem);
31185 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31187 static bool
31188 is_power9_pairable_vec_type (enum attr_type type)
31190 switch (type)
31192 case TYPE_VECSIMPLE:
31193 case TYPE_VECCOMPLEX:
31194 case TYPE_VECDIV:
31195 case TYPE_VECCMP:
31196 case TYPE_VECPERM:
31197 case TYPE_VECFLOAT:
31198 case TYPE_VECFDIV:
31199 case TYPE_VECDOUBLE:
31200 return true;
31201 default:
31202 break;
31204 return false;
31207 /* Returns whether the dependence between INSN and NEXT is considered
31208 costly by the given target. */
31210 static bool
31211 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31213 rtx insn;
31214 rtx next;
31215 rtx load_mem, str_mem;
31217 /* If the flag is not enabled - no dependence is considered costly;
31218 allow all dependent insns in the same group.
31219 This is the most aggressive option. */
31220 if (rs6000_sched_costly_dep == no_dep_costly)
31221 return false;
31223 /* If the flag is set to 1 - a dependence is always considered costly;
31224 do not allow dependent instructions in the same group.
31225 This is the most conservative option. */
31226 if (rs6000_sched_costly_dep == all_deps_costly)
31227 return true;
31229 insn = DEP_PRO (dep);
31230 next = DEP_CON (dep);
31232 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31233 && is_load_insn (next, &load_mem)
31234 && is_store_insn (insn, &str_mem))
31235 /* Prevent load after store in the same group. */
31236 return true;
31238 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31239 && is_load_insn (next, &load_mem)
31240 && is_store_insn (insn, &str_mem)
31241 && DEP_TYPE (dep) == REG_DEP_TRUE
31242 && mem_locations_overlap(str_mem, load_mem))
31243 /* Prevent load after store in the same group if it is a true
31244 dependence. */
31245 return true;
31247 /* The flag is set to X; dependences with latency >= X are considered costly,
31248 and will not be scheduled in the same group. */
31249 if (rs6000_sched_costly_dep <= max_dep_latency
31250 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31251 return true;
31253 return false;
31256 /* Return the next insn after INSN that is found before TAIL is reached,
31257 skipping any "non-active" insns - insns that will not actually occupy
31258 an issue slot. Return NULL_RTX if such an insn is not found. */
31260 static rtx_insn *
31261 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31263 if (insn == NULL_RTX || insn == tail)
31264 return NULL;
31266 while (1)
31268 insn = NEXT_INSN (insn);
31269 if (insn == NULL_RTX || insn == tail)
31270 return NULL;
31272 if (CALL_P (insn)
31273 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31274 || (NONJUMP_INSN_P (insn)
31275 && GET_CODE (PATTERN (insn)) != USE
31276 && GET_CODE (PATTERN (insn)) != CLOBBER
31277 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31278 break;
31280 return insn;
31283 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31285 static int
31286 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31288 int pos;
31289 int i;
31290 rtx_insn *tmp;
31291 enum attr_type type;
31293 type = get_attr_type (last_scheduled_insn);
31295 /* Try to issue fixed point divides back-to-back in pairs so they will be
31296 routed to separate execution units and execute in parallel. */
31297 if (type == TYPE_DIV && divide_cnt == 0)
31299 /* First divide has been scheduled. */
31300 divide_cnt = 1;
31302 /* Scan the ready list looking for another divide, if found move it
31303 to the end of the list so it is chosen next. */
31304 pos = lastpos;
31305 while (pos >= 0)
31307 if (recog_memoized (ready[pos]) >= 0
31308 && get_attr_type (ready[pos]) == TYPE_DIV)
31310 tmp = ready[pos];
31311 for (i = pos; i < lastpos; i++)
31312 ready[i] = ready[i + 1];
31313 ready[lastpos] = tmp;
31314 break;
31316 pos--;
31319 else
31321 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31322 divide_cnt = 0;
31324 /* Power9 can execute 2 vector operations and 2 vector loads in a single
31325 cycle. So try to pair up and alternate groups of vector and vector
31326 load instructions.
31328 To aid this formation, a counter is maintained to keep track of
31329 vec/vecload insns issued. The value of vec_load_pendulum maintains
31330 the current state with the following values:
31332 0 : Initial state, no vec/vecload group has been started.
31334 -1 : 1 vector load has been issued and another has been found on
31335 the ready list and moved to the end.
31337 -2 : 2 vector loads have been issued and a vector operation has
31338 been found and moved to the end of the ready list.
31340 -3 : 2 vector loads and a vector insn have been issued and a
31341 vector operation has been found and moved to the end of the
31342 ready list.
31344 1 : 1 vector insn has been issued and another has been found and
31345 moved to the end of the ready list.
31347 2 : 2 vector insns have been issued and a vector load has been
31348 found and moved to the end of the ready list.
31350 3 : 2 vector insns and a vector load have been issued and another
31351 vector load has been found and moved to the end of the ready
31352 list. */
31353 if (type == TYPE_VECLOAD)
31355 /* Issued a vecload. */
31356 if (vec_load_pendulum == 0)
31358 /* We issued a single vecload, look for another and move it to
31359 the end of the ready list so it will be scheduled next.
31360 Set pendulum if found. */
31361 pos = lastpos;
31362 while (pos >= 0)
31364 if (recog_memoized (ready[pos]) >= 0
31365 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
31367 tmp = ready[pos];
31368 for (i = pos; i < lastpos; i++)
31369 ready[i] = ready[i + 1];
31370 ready[lastpos] = tmp;
31371 vec_load_pendulum = -1;
31372 return cached_can_issue_more;
31374 pos--;
31377 else if (vec_load_pendulum == -1)
31379 /* This is the second vecload we've issued, search the ready
31380 list for a vector operation so we can try to schedule a
31381 pair of those next. If found move to the end of the ready
31382 list so it is scheduled next and set the pendulum. */
31383 pos = lastpos;
31384 while (pos >= 0)
31386 if (recog_memoized (ready[pos]) >= 0
31387 && is_power9_pairable_vec_type (
31388 get_attr_type (ready[pos])))
31390 tmp = ready[pos];
31391 for (i = pos; i < lastpos; i++)
31392 ready[i] = ready[i + 1];
31393 ready[lastpos] = tmp;
31394 vec_load_pendulum = -2;
31395 return cached_can_issue_more;
31397 pos--;
31400 else if (vec_load_pendulum == 2)
31402 /* Two vector ops have been issued and we've just issued a
31403 vecload, look for another vecload and move to end of ready
31404 list if found. */
31405 pos = lastpos;
31406 while (pos >= 0)
31408 if (recog_memoized (ready[pos]) >= 0
31409 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
31411 tmp = ready[pos];
31412 for (i = pos; i < lastpos; i++)
31413 ready[i] = ready[i + 1];
31414 ready[lastpos] = tmp;
31415 /* Set pendulum so that next vecload will be seen as
31416 finishing a group, not start of one. */
31417 vec_load_pendulum = 3;
31418 return cached_can_issue_more;
31420 pos--;
31424 else if (is_power9_pairable_vec_type (type))
31426 /* Issued a vector operation. */
31427 if (vec_load_pendulum == 0)
31428 /* We issued a single vec op, look for another and move it
31429 to the end of the ready list so it will be scheduled next.
31430 Set pendulum if found. */
31432 pos = lastpos;
31433 while (pos >= 0)
31435 if (recog_memoized (ready[pos]) >= 0
31436 && is_power9_pairable_vec_type (
31437 get_attr_type (ready[pos])))
31439 tmp = ready[pos];
31440 for (i = pos; i < lastpos; i++)
31441 ready[i] = ready[i + 1];
31442 ready[lastpos] = tmp;
31443 vec_load_pendulum = 1;
31444 return cached_can_issue_more;
31446 pos--;
31449 else if (vec_load_pendulum == 1)
31451 /* This is the second vec op we've issued, search the ready
31452 list for a vecload operation so we can try to schedule a
31453 pair of those next. If found move to the end of the ready
31454 list so it is scheduled next and set the pendulum. */
31455 pos = lastpos;
31456 while (pos >= 0)
31458 if (recog_memoized (ready[pos]) >= 0
31459 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
31461 tmp = ready[pos];
31462 for (i = pos; i < lastpos; i++)
31463 ready[i] = ready[i + 1];
31464 ready[lastpos] = tmp;
31465 vec_load_pendulum = 2;
31466 return cached_can_issue_more;
31468 pos--;
31471 else if (vec_load_pendulum == -2)
31473 /* Two vecload ops have been issued and we've just issued a
31474 vec op, look for another vec op and move to end of ready
31475 list if found. */
31476 pos = lastpos;
31477 while (pos >= 0)
31479 if (recog_memoized (ready[pos]) >= 0
31480 && is_power9_pairable_vec_type (
31481 get_attr_type (ready[pos])))
31483 tmp = ready[pos];
31484 for (i = pos; i < lastpos; i++)
31485 ready[i] = ready[i + 1];
31486 ready[lastpos] = tmp;
31487 /* Set pendulum so that next vec op will be seen as
31488 finishing a group, not start of one. */
31489 vec_load_pendulum = -3;
31490 return cached_can_issue_more;
31492 pos--;
31497 /* We've either finished a vec/vecload group, couldn't find an insn to
31498 continue the current group, or the last insn had nothing to do with
31499 with a group. In any case, reset the pendulum. */
31500 vec_load_pendulum = 0;
31503 return cached_can_issue_more;
31506 /* We are about to begin issuing insns for this clock cycle. */
31508 static int
31509 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31510 rtx_insn **ready ATTRIBUTE_UNUSED,
31511 int *pn_ready ATTRIBUTE_UNUSED,
31512 int clock_var ATTRIBUTE_UNUSED)
31514 int n_ready = *pn_ready;
31516 if (sched_verbose)
31517 fprintf (dump, "// rs6000_sched_reorder :\n");
31519 /* Reorder the ready list, if the second to last ready insn
31520 is a nonepipeline insn. */
31521 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31523 if (is_nonpipeline_insn (ready[n_ready - 1])
31524 && (recog_memoized (ready[n_ready - 2]) > 0))
31525 /* Simply swap first two insns. */
31526 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31529 if (rs6000_cpu == PROCESSOR_POWER6)
31530 load_store_pendulum = 0;
31532 return rs6000_issue_rate ();
31535 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31537 static int
31538 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31539 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31541 if (sched_verbose)
31542 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31544 /* For Power6, we need to handle some special cases to try and keep the
31545 store queue from overflowing and triggering expensive flushes.
31547 This code monitors how load and store instructions are being issued
31548 and skews the ready list one way or the other to increase the likelihood
31549 that a desired instruction is issued at the proper time.
31551 A couple of things are done. First, we maintain a "load_store_pendulum"
31552 to track the current state of load/store issue.
31554 - If the pendulum is at zero, then no loads or stores have been
31555 issued in the current cycle so we do nothing.
31557 - If the pendulum is 1, then a single load has been issued in this
31558 cycle and we attempt to locate another load in the ready list to
31559 issue with it.
31561 - If the pendulum is -2, then two stores have already been
31562 issued in this cycle, so we increase the priority of the first load
31563 in the ready list to increase it's likelihood of being chosen first
31564 in the next cycle.
31566 - If the pendulum is -1, then a single store has been issued in this
31567 cycle and we attempt to locate another store in the ready list to
31568 issue with it, preferring a store to an adjacent memory location to
31569 facilitate store pairing in the store queue.
31571 - If the pendulum is 2, then two loads have already been
31572 issued in this cycle, so we increase the priority of the first store
31573 in the ready list to increase it's likelihood of being chosen first
31574 in the next cycle.
31576 - If the pendulum < -2 or > 2, then do nothing.
31578 Note: This code covers the most common scenarios. There exist non
31579 load/store instructions which make use of the LSU and which
31580 would need to be accounted for to strictly model the behavior
31581 of the machine. Those instructions are currently unaccounted
31582 for to help minimize compile time overhead of this code.
31584 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31586 int pos;
31587 int i;
31588 rtx_insn *tmp;
31589 rtx load_mem, str_mem;
31591 if (is_store_insn (last_scheduled_insn, &str_mem))
31592 /* Issuing a store, swing the load_store_pendulum to the left */
31593 load_store_pendulum--;
31594 else if (is_load_insn (last_scheduled_insn, &load_mem))
31595 /* Issuing a load, swing the load_store_pendulum to the right */
31596 load_store_pendulum++;
31597 else
31598 return cached_can_issue_more;
31600 /* If the pendulum is balanced, or there is only one instruction on
31601 the ready list, then all is well, so return. */
31602 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31603 return cached_can_issue_more;
31605 if (load_store_pendulum == 1)
31607 /* A load has been issued in this cycle. Scan the ready list
31608 for another load to issue with it */
31609 pos = *pn_ready-1;
31611 while (pos >= 0)
31613 if (is_load_insn (ready[pos], &load_mem))
31615 /* Found a load. Move it to the head of the ready list,
31616 and adjust it's priority so that it is more likely to
31617 stay there */
31618 tmp = ready[pos];
31619 for (i=pos; i<*pn_ready-1; i++)
31620 ready[i] = ready[i + 1];
31621 ready[*pn_ready-1] = tmp;
31623 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31624 INSN_PRIORITY (tmp)++;
31625 break;
31627 pos--;
31630 else if (load_store_pendulum == -2)
31632 /* Two stores have been issued in this cycle. Increase the
31633 priority of the first load in the ready list to favor it for
31634 issuing in the next cycle. */
31635 pos = *pn_ready-1;
31637 while (pos >= 0)
31639 if (is_load_insn (ready[pos], &load_mem)
31640 && !sel_sched_p ()
31641 && INSN_PRIORITY_KNOWN (ready[pos]))
31643 INSN_PRIORITY (ready[pos])++;
31645 /* Adjust the pendulum to account for the fact that a load
31646 was found and increased in priority. This is to prevent
31647 increasing the priority of multiple loads */
31648 load_store_pendulum--;
31650 break;
31652 pos--;
31655 else if (load_store_pendulum == -1)
31657 /* A store has been issued in this cycle. Scan the ready list for
31658 another store to issue with it, preferring a store to an adjacent
31659 memory location */
31660 int first_store_pos = -1;
31662 pos = *pn_ready-1;
31664 while (pos >= 0)
31666 if (is_store_insn (ready[pos], &str_mem))
31668 rtx str_mem2;
31669 /* Maintain the index of the first store found on the
31670 list */
31671 if (first_store_pos == -1)
31672 first_store_pos = pos;
31674 if (is_store_insn (last_scheduled_insn, &str_mem2)
31675 && adjacent_mem_locations (str_mem, str_mem2))
31677 /* Found an adjacent store. Move it to the head of the
31678 ready list, and adjust it's priority so that it is
31679 more likely to stay there */
31680 tmp = ready[pos];
31681 for (i=pos; i<*pn_ready-1; i++)
31682 ready[i] = ready[i + 1];
31683 ready[*pn_ready-1] = tmp;
31685 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31686 INSN_PRIORITY (tmp)++;
31688 first_store_pos = -1;
31690 break;
31693 pos--;
31696 if (first_store_pos >= 0)
31698 /* An adjacent store wasn't found, but a non-adjacent store was,
31699 so move the non-adjacent store to the front of the ready
31700 list, and adjust its priority so that it is more likely to
31701 stay there. */
31702 tmp = ready[first_store_pos];
31703 for (i=first_store_pos; i<*pn_ready-1; i++)
31704 ready[i] = ready[i + 1];
31705 ready[*pn_ready-1] = tmp;
31706 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31707 INSN_PRIORITY (tmp)++;
31710 else if (load_store_pendulum == 2)
31712 /* Two loads have been issued in this cycle. Increase the priority
31713 of the first store in the ready list to favor it for issuing in
31714 the next cycle. */
31715 pos = *pn_ready-1;
31717 while (pos >= 0)
31719 if (is_store_insn (ready[pos], &str_mem)
31720 && !sel_sched_p ()
31721 && INSN_PRIORITY_KNOWN (ready[pos]))
31723 INSN_PRIORITY (ready[pos])++;
31725 /* Adjust the pendulum to account for the fact that a store
31726 was found and increased in priority. This is to prevent
31727 increasing the priority of multiple stores */
31728 load_store_pendulum++;
31730 break;
31732 pos--;
31737 /* Do Power9 dependent reordering if necessary. */
31738 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31739 && recog_memoized (last_scheduled_insn) >= 0)
31740 return power9_sched_reorder2 (ready, *pn_ready - 1);
31742 return cached_can_issue_more;
31745 /* Return whether the presence of INSN causes a dispatch group termination
31746 of group WHICH_GROUP.
31748 If WHICH_GROUP == current_group, this function will return true if INSN
31749 causes the termination of the current group (i.e, the dispatch group to
31750 which INSN belongs). This means that INSN will be the last insn in the
31751 group it belongs to.
31753 If WHICH_GROUP == previous_group, this function will return true if INSN
31754 causes the termination of the previous group (i.e, the dispatch group that
31755 precedes the group to which INSN belongs). This means that INSN will be
31756 the first insn in the group it belongs to). */
31758 static bool
31759 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31761 bool first, last;
31763 if (! insn)
31764 return false;
31766 first = insn_must_be_first_in_group (insn);
31767 last = insn_must_be_last_in_group (insn);
31769 if (first && last)
31770 return true;
31772 if (which_group == current_group)
31773 return last;
31774 else if (which_group == previous_group)
31775 return first;
31777 return false;
31781 static bool
31782 insn_must_be_first_in_group (rtx_insn *insn)
31784 enum attr_type type;
31786 if (!insn
31787 || NOTE_P (insn)
31788 || DEBUG_INSN_P (insn)
31789 || GET_CODE (PATTERN (insn)) == USE
31790 || GET_CODE (PATTERN (insn)) == CLOBBER)
31791 return false;
31793 switch (rs6000_cpu)
31795 case PROCESSOR_POWER5:
31796 if (is_cracked_insn (insn))
31797 return true;
31798 case PROCESSOR_POWER4:
31799 if (is_microcoded_insn (insn))
31800 return true;
31802 if (!rs6000_sched_groups)
31803 return false;
31805 type = get_attr_type (insn);
31807 switch (type)
31809 case TYPE_MFCR:
31810 case TYPE_MFCRF:
31811 case TYPE_MTCR:
31812 case TYPE_DELAYED_CR:
31813 case TYPE_CR_LOGICAL:
31814 case TYPE_MTJMPR:
31815 case TYPE_MFJMPR:
31816 case TYPE_DIV:
31817 case TYPE_LOAD_L:
31818 case TYPE_STORE_C:
31819 case TYPE_ISYNC:
31820 case TYPE_SYNC:
31821 return true;
31822 default:
31823 break;
31825 break;
31826 case PROCESSOR_POWER6:
31827 type = get_attr_type (insn);
31829 switch (type)
31831 case TYPE_EXTS:
31832 case TYPE_CNTLZ:
31833 case TYPE_TRAP:
31834 case TYPE_MUL:
31835 case TYPE_INSERT:
31836 case TYPE_FPCOMPARE:
31837 case TYPE_MFCR:
31838 case TYPE_MTCR:
31839 case TYPE_MFJMPR:
31840 case TYPE_MTJMPR:
31841 case TYPE_ISYNC:
31842 case TYPE_SYNC:
31843 case TYPE_LOAD_L:
31844 case TYPE_STORE_C:
31845 return true;
31846 case TYPE_SHIFT:
31847 if (get_attr_dot (insn) == DOT_NO
31848 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31849 return true;
31850 else
31851 break;
31852 case TYPE_DIV:
31853 if (get_attr_size (insn) == SIZE_32)
31854 return true;
31855 else
31856 break;
31857 case TYPE_LOAD:
31858 case TYPE_STORE:
31859 case TYPE_FPLOAD:
31860 case TYPE_FPSTORE:
31861 if (get_attr_update (insn) == UPDATE_YES)
31862 return true;
31863 else
31864 break;
31865 default:
31866 break;
31868 break;
31869 case PROCESSOR_POWER7:
31870 type = get_attr_type (insn);
31872 switch (type)
31874 case TYPE_CR_LOGICAL:
31875 case TYPE_MFCR:
31876 case TYPE_MFCRF:
31877 case TYPE_MTCR:
31878 case TYPE_DIV:
31879 case TYPE_ISYNC:
31880 case TYPE_LOAD_L:
31881 case TYPE_STORE_C:
31882 case TYPE_MFJMPR:
31883 case TYPE_MTJMPR:
31884 return true;
31885 case TYPE_MUL:
31886 case TYPE_SHIFT:
31887 case TYPE_EXTS:
31888 if (get_attr_dot (insn) == DOT_YES)
31889 return true;
31890 else
31891 break;
31892 case TYPE_LOAD:
31893 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31894 || get_attr_update (insn) == UPDATE_YES)
31895 return true;
31896 else
31897 break;
31898 case TYPE_STORE:
31899 case TYPE_FPLOAD:
31900 case TYPE_FPSTORE:
31901 if (get_attr_update (insn) == UPDATE_YES)
31902 return true;
31903 else
31904 break;
31905 default:
31906 break;
31908 break;
31909 case PROCESSOR_POWER8:
31910 type = get_attr_type (insn);
31912 switch (type)
31914 case TYPE_CR_LOGICAL:
31915 case TYPE_DELAYED_CR:
31916 case TYPE_MFCR:
31917 case TYPE_MFCRF:
31918 case TYPE_MTCR:
31919 case TYPE_SYNC:
31920 case TYPE_ISYNC:
31921 case TYPE_LOAD_L:
31922 case TYPE_STORE_C:
31923 case TYPE_VECSTORE:
31924 case TYPE_MFJMPR:
31925 case TYPE_MTJMPR:
31926 return true;
31927 case TYPE_SHIFT:
31928 case TYPE_EXTS:
31929 case TYPE_MUL:
31930 if (get_attr_dot (insn) == DOT_YES)
31931 return true;
31932 else
31933 break;
31934 case TYPE_LOAD:
31935 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31936 || get_attr_update (insn) == UPDATE_YES)
31937 return true;
31938 else
31939 break;
31940 case TYPE_STORE:
31941 if (get_attr_update (insn) == UPDATE_YES
31942 && get_attr_indexed (insn) == INDEXED_YES)
31943 return true;
31944 else
31945 break;
31946 default:
31947 break;
31949 break;
31950 default:
31951 break;
31954 return false;
31957 static bool
31958 insn_must_be_last_in_group (rtx_insn *insn)
31960 enum attr_type type;
31962 if (!insn
31963 || NOTE_P (insn)
31964 || DEBUG_INSN_P (insn)
31965 || GET_CODE (PATTERN (insn)) == USE
31966 || GET_CODE (PATTERN (insn)) == CLOBBER)
31967 return false;
31969 switch (rs6000_cpu) {
31970 case PROCESSOR_POWER4:
31971 case PROCESSOR_POWER5:
31972 if (is_microcoded_insn (insn))
31973 return true;
31975 if (is_branch_slot_insn (insn))
31976 return true;
31978 break;
31979 case PROCESSOR_POWER6:
31980 type = get_attr_type (insn);
31982 switch (type)
31984 case TYPE_EXTS:
31985 case TYPE_CNTLZ:
31986 case TYPE_TRAP:
31987 case TYPE_MUL:
31988 case TYPE_FPCOMPARE:
31989 case TYPE_MFCR:
31990 case TYPE_MTCR:
31991 case TYPE_MFJMPR:
31992 case TYPE_MTJMPR:
31993 case TYPE_ISYNC:
31994 case TYPE_SYNC:
31995 case TYPE_LOAD_L:
31996 case TYPE_STORE_C:
31997 return true;
31998 case TYPE_SHIFT:
31999 if (get_attr_dot (insn) == DOT_NO
32000 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32001 return true;
32002 else
32003 break;
32004 case TYPE_DIV:
32005 if (get_attr_size (insn) == SIZE_32)
32006 return true;
32007 else
32008 break;
32009 default:
32010 break;
32012 break;
32013 case PROCESSOR_POWER7:
32014 type = get_attr_type (insn);
32016 switch (type)
32018 case TYPE_ISYNC:
32019 case TYPE_SYNC:
32020 case TYPE_LOAD_L:
32021 case TYPE_STORE_C:
32022 return true;
32023 case TYPE_LOAD:
32024 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32025 && get_attr_update (insn) == UPDATE_YES)
32026 return true;
32027 else
32028 break;
32029 case TYPE_STORE:
32030 if (get_attr_update (insn) == UPDATE_YES
32031 && get_attr_indexed (insn) == INDEXED_YES)
32032 return true;
32033 else
32034 break;
32035 default:
32036 break;
32038 break;
32039 case PROCESSOR_POWER8:
32040 type = get_attr_type (insn);
32042 switch (type)
32044 case TYPE_MFCR:
32045 case TYPE_MTCR:
32046 case TYPE_ISYNC:
32047 case TYPE_SYNC:
32048 case TYPE_LOAD_L:
32049 case TYPE_STORE_C:
32050 return true;
32051 case TYPE_LOAD:
32052 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32053 && get_attr_update (insn) == UPDATE_YES)
32054 return true;
32055 else
32056 break;
32057 case TYPE_STORE:
32058 if (get_attr_update (insn) == UPDATE_YES
32059 && get_attr_indexed (insn) == INDEXED_YES)
32060 return true;
32061 else
32062 break;
32063 default:
32064 break;
32066 break;
32067 default:
32068 break;
32071 return false;
32074 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32075 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32077 static bool
32078 is_costly_group (rtx *group_insns, rtx next_insn)
32080 int i;
32081 int issue_rate = rs6000_issue_rate ();
32083 for (i = 0; i < issue_rate; i++)
32085 sd_iterator_def sd_it;
32086 dep_t dep;
32087 rtx insn = group_insns[i];
32089 if (!insn)
32090 continue;
32092 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32094 rtx next = DEP_CON (dep);
32096 if (next == next_insn
32097 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32098 return true;
32102 return false;
32105 /* Utility of the function redefine_groups.
32106 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32107 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32108 to keep it "far" (in a separate group) from GROUP_INSNS, following
32109 one of the following schemes, depending on the value of the flag
32110 -minsert_sched_nops = X:
32111 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32112 in order to force NEXT_INSN into a separate group.
32113 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32114 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32115 insertion (has a group just ended, how many vacant issue slots remain in the
32116 last group, and how many dispatch groups were encountered so far). */
32118 static int
32119 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32120 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32121 int *group_count)
32123 rtx nop;
32124 bool force;
32125 int issue_rate = rs6000_issue_rate ();
32126 bool end = *group_end;
32127 int i;
32129 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32130 return can_issue_more;
32132 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32133 return can_issue_more;
32135 force = is_costly_group (group_insns, next_insn);
32136 if (!force)
32137 return can_issue_more;
32139 if (sched_verbose > 6)
32140 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32141 *group_count ,can_issue_more);
32143 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32145 if (*group_end)
32146 can_issue_more = 0;
32148 /* Since only a branch can be issued in the last issue_slot, it is
32149 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32150 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32151 in this case the last nop will start a new group and the branch
32152 will be forced to the new group. */
32153 if (can_issue_more && !is_branch_slot_insn (next_insn))
32154 can_issue_more--;
32156 /* Do we have a special group ending nop? */
32157 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
32158 || rs6000_cpu_attr == CPU_POWER8)
32160 nop = gen_group_ending_nop ();
32161 emit_insn_before (nop, next_insn);
32162 can_issue_more = 0;
32164 else
32165 while (can_issue_more > 0)
32167 nop = gen_nop ();
32168 emit_insn_before (nop, next_insn);
32169 can_issue_more--;
32172 *group_end = true;
32173 return 0;
32176 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32178 int n_nops = rs6000_sched_insert_nops;
32180 /* Nops can't be issued from the branch slot, so the effective
32181 issue_rate for nops is 'issue_rate - 1'. */
32182 if (can_issue_more == 0)
32183 can_issue_more = issue_rate;
32184 can_issue_more--;
32185 if (can_issue_more == 0)
32187 can_issue_more = issue_rate - 1;
32188 (*group_count)++;
32189 end = true;
32190 for (i = 0; i < issue_rate; i++)
32192 group_insns[i] = 0;
32196 while (n_nops > 0)
32198 nop = gen_nop ();
32199 emit_insn_before (nop, next_insn);
32200 if (can_issue_more == issue_rate - 1) /* new group begins */
32201 end = false;
32202 can_issue_more--;
32203 if (can_issue_more == 0)
32205 can_issue_more = issue_rate - 1;
32206 (*group_count)++;
32207 end = true;
32208 for (i = 0; i < issue_rate; i++)
32210 group_insns[i] = 0;
32213 n_nops--;
32216 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32217 can_issue_more++;
32219 /* Is next_insn going to start a new group? */
32220 *group_end
32221 = (end
32222 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32223 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32224 || (can_issue_more < issue_rate &&
32225 insn_terminates_group_p (next_insn, previous_group)));
32226 if (*group_end && end)
32227 (*group_count)--;
32229 if (sched_verbose > 6)
32230 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32231 *group_count, can_issue_more);
32232 return can_issue_more;
32235 return can_issue_more;
32238 /* This function tries to synch the dispatch groups that the compiler "sees"
32239 with the dispatch groups that the processor dispatcher is expected to
32240 form in practice. It tries to achieve this synchronization by forcing the
32241 estimated processor grouping on the compiler (as opposed to the function
32242 'pad_goups' which tries to force the scheduler's grouping on the processor).
32244 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32245 examines the (estimated) dispatch groups that will be formed by the processor
32246 dispatcher. It marks these group boundaries to reflect the estimated
32247 processor grouping, overriding the grouping that the scheduler had marked.
32248 Depending on the value of the flag '-minsert-sched-nops' this function can
32249 force certain insns into separate groups or force a certain distance between
32250 them by inserting nops, for example, if there exists a "costly dependence"
32251 between the insns.
32253 The function estimates the group boundaries that the processor will form as
32254 follows: It keeps track of how many vacant issue slots are available after
32255 each insn. A subsequent insn will start a new group if one of the following
32256 4 cases applies:
32257 - no more vacant issue slots remain in the current dispatch group.
32258 - only the last issue slot, which is the branch slot, is vacant, but the next
32259 insn is not a branch.
32260 - only the last 2 or less issue slots, including the branch slot, are vacant,
32261 which means that a cracked insn (which occupies two issue slots) can't be
32262 issued in this group.
32263 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32264 start a new group. */
32266 static int
32267 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32268 rtx_insn *tail)
32270 rtx_insn *insn, *next_insn;
32271 int issue_rate;
32272 int can_issue_more;
32273 int slot, i;
32274 bool group_end;
32275 int group_count = 0;
32276 rtx *group_insns;
32278 /* Initialize. */
32279 issue_rate = rs6000_issue_rate ();
32280 group_insns = XALLOCAVEC (rtx, issue_rate);
32281 for (i = 0; i < issue_rate; i++)
32283 group_insns[i] = 0;
32285 can_issue_more = issue_rate;
32286 slot = 0;
32287 insn = get_next_active_insn (prev_head_insn, tail);
32288 group_end = false;
32290 while (insn != NULL_RTX)
32292 slot = (issue_rate - can_issue_more);
32293 group_insns[slot] = insn;
32294 can_issue_more =
32295 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32296 if (insn_terminates_group_p (insn, current_group))
32297 can_issue_more = 0;
32299 next_insn = get_next_active_insn (insn, tail);
32300 if (next_insn == NULL_RTX)
32301 return group_count + 1;
32303 /* Is next_insn going to start a new group? */
32304 group_end
32305 = (can_issue_more == 0
32306 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32307 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32308 || (can_issue_more < issue_rate &&
32309 insn_terminates_group_p (next_insn, previous_group)));
32311 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32312 next_insn, &group_end, can_issue_more,
32313 &group_count);
32315 if (group_end)
32317 group_count++;
32318 can_issue_more = 0;
32319 for (i = 0; i < issue_rate; i++)
32321 group_insns[i] = 0;
32325 if (GET_MODE (next_insn) == TImode && can_issue_more)
32326 PUT_MODE (next_insn, VOIDmode);
32327 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32328 PUT_MODE (next_insn, TImode);
32330 insn = next_insn;
32331 if (can_issue_more == 0)
32332 can_issue_more = issue_rate;
32333 } /* while */
32335 return group_count;
32338 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32339 dispatch group boundaries that the scheduler had marked. Pad with nops
32340 any dispatch groups which have vacant issue slots, in order to force the
32341 scheduler's grouping on the processor dispatcher. The function
32342 returns the number of dispatch groups found. */
32344 static int
32345 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32346 rtx_insn *tail)
32348 rtx_insn *insn, *next_insn;
32349 rtx nop;
32350 int issue_rate;
32351 int can_issue_more;
32352 int group_end;
32353 int group_count = 0;
32355 /* Initialize issue_rate. */
32356 issue_rate = rs6000_issue_rate ();
32357 can_issue_more = issue_rate;
32359 insn = get_next_active_insn (prev_head_insn, tail);
32360 next_insn = get_next_active_insn (insn, tail);
32362 while (insn != NULL_RTX)
32364 can_issue_more =
32365 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32367 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32369 if (next_insn == NULL_RTX)
32370 break;
32372 if (group_end)
32374 /* If the scheduler had marked group termination at this location
32375 (between insn and next_insn), and neither insn nor next_insn will
32376 force group termination, pad the group with nops to force group
32377 termination. */
32378 if (can_issue_more
32379 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32380 && !insn_terminates_group_p (insn, current_group)
32381 && !insn_terminates_group_p (next_insn, previous_group))
32383 if (!is_branch_slot_insn (next_insn))
32384 can_issue_more--;
32386 while (can_issue_more)
32388 nop = gen_nop ();
32389 emit_insn_before (nop, next_insn);
32390 can_issue_more--;
32394 can_issue_more = issue_rate;
32395 group_count++;
32398 insn = next_insn;
32399 next_insn = get_next_active_insn (insn, tail);
32402 return group_count;
32405 /* We're beginning a new block. Initialize data structures as necessary. */
32407 static void
32408 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32409 int sched_verbose ATTRIBUTE_UNUSED,
32410 int max_ready ATTRIBUTE_UNUSED)
32412 last_scheduled_insn = NULL;
32413 load_store_pendulum = 0;
32414 divide_cnt = 0;
32415 vec_load_pendulum = 0;
32418 /* The following function is called at the end of scheduling BB.
32419 After reload, it inserts nops at insn group bundling. */
32421 static void
32422 rs6000_sched_finish (FILE *dump, int sched_verbose)
32424 int n_groups;
32426 if (sched_verbose)
32427 fprintf (dump, "=== Finishing schedule.\n");
32429 if (reload_completed && rs6000_sched_groups)
32431 /* Do not run sched_finish hook when selective scheduling enabled. */
32432 if (sel_sched_p ())
32433 return;
32435 if (rs6000_sched_insert_nops == sched_finish_none)
32436 return;
32438 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32439 n_groups = pad_groups (dump, sched_verbose,
32440 current_sched_info->prev_head,
32441 current_sched_info->next_tail);
32442 else
32443 n_groups = redefine_groups (dump, sched_verbose,
32444 current_sched_info->prev_head,
32445 current_sched_info->next_tail);
32447 if (sched_verbose >= 6)
32449 fprintf (dump, "ngroups = %d\n", n_groups);
32450 print_rtl (dump, current_sched_info->prev_head);
32451 fprintf (dump, "Done finish_sched\n");
32456 struct rs6000_sched_context
32458 short cached_can_issue_more;
32459 rtx_insn *last_scheduled_insn;
32460 int load_store_pendulum;
32461 int divide_cnt;
32462 int vec_load_pendulum;
32465 typedef struct rs6000_sched_context rs6000_sched_context_def;
32466 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32468 /* Allocate store for new scheduling context. */
32469 static void *
32470 rs6000_alloc_sched_context (void)
32472 return xmalloc (sizeof (rs6000_sched_context_def));
32475 /* If CLEAN_P is true then initializes _SC with clean data,
32476 and from the global context otherwise. */
32477 static void
32478 rs6000_init_sched_context (void *_sc, bool clean_p)
32480 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32482 if (clean_p)
32484 sc->cached_can_issue_more = 0;
32485 sc->last_scheduled_insn = NULL;
32486 sc->load_store_pendulum = 0;
32487 sc->divide_cnt = 0;
32488 sc->vec_load_pendulum = 0;
32490 else
32492 sc->cached_can_issue_more = cached_can_issue_more;
32493 sc->last_scheduled_insn = last_scheduled_insn;
32494 sc->load_store_pendulum = load_store_pendulum;
32495 sc->divide_cnt = divide_cnt;
32496 sc->vec_load_pendulum = vec_load_pendulum;
32500 /* Sets the global scheduling context to the one pointed to by _SC. */
32501 static void
32502 rs6000_set_sched_context (void *_sc)
32504 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32506 gcc_assert (sc != NULL);
32508 cached_can_issue_more = sc->cached_can_issue_more;
32509 last_scheduled_insn = sc->last_scheduled_insn;
32510 load_store_pendulum = sc->load_store_pendulum;
32511 divide_cnt = sc->divide_cnt;
32512 vec_load_pendulum = sc->vec_load_pendulum;
32515 /* Free _SC. */
32516 static void
32517 rs6000_free_sched_context (void *_sc)
32519 gcc_assert (_sc != NULL);
32521 free (_sc);
32525 /* Length in units of the trampoline for entering a nested function. */
32528 rs6000_trampoline_size (void)
32530 int ret = 0;
32532 switch (DEFAULT_ABI)
32534 default:
32535 gcc_unreachable ();
32537 case ABI_AIX:
32538 ret = (TARGET_32BIT) ? 12 : 24;
32539 break;
32541 case ABI_ELFv2:
32542 gcc_assert (!TARGET_32BIT);
32543 ret = 32;
32544 break;
32546 case ABI_DARWIN:
32547 case ABI_V4:
32548 ret = (TARGET_32BIT) ? 40 : 48;
32549 break;
32552 return ret;
32555 /* Emit RTL insns to initialize the variable parts of a trampoline.
32556 FNADDR is an RTX for the address of the function's pure code.
32557 CXT is an RTX for the static chain value for the function. */
32559 static void
32560 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32562 int regsize = (TARGET_32BIT) ? 4 : 8;
32563 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32564 rtx ctx_reg = force_reg (Pmode, cxt);
32565 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32567 switch (DEFAULT_ABI)
32569 default:
32570 gcc_unreachable ();
32572 /* Under AIX, just build the 3 word function descriptor */
32573 case ABI_AIX:
32575 rtx fnmem, fn_reg, toc_reg;
32577 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32578 error ("You cannot take the address of a nested function if you use "
32579 "the -mno-pointers-to-nested-functions option.");
32581 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32582 fn_reg = gen_reg_rtx (Pmode);
32583 toc_reg = gen_reg_rtx (Pmode);
32585 /* Macro to shorten the code expansions below. */
32586 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32588 m_tramp = replace_equiv_address (m_tramp, addr);
32590 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32591 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32592 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32593 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32594 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32596 # undef MEM_PLUS
32598 break;
32600 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32601 case ABI_ELFv2:
32602 case ABI_DARWIN:
32603 case ABI_V4:
32604 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32605 LCT_NORMAL, VOIDmode, 4,
32606 addr, Pmode,
32607 GEN_INT (rs6000_trampoline_size ()), SImode,
32608 fnaddr, Pmode,
32609 ctx_reg, Pmode);
32610 break;
32615 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32616 identifier as an argument, so the front end shouldn't look it up. */
32618 static bool
32619 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32621 return is_attribute_p ("altivec", attr_id);
32624 /* Handle the "altivec" attribute. The attribute may have
32625 arguments as follows:
32627 __attribute__((altivec(vector__)))
32628 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32629 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32631 and may appear more than once (e.g., 'vector bool char') in a
32632 given declaration. */
32634 static tree
32635 rs6000_handle_altivec_attribute (tree *node,
32636 tree name ATTRIBUTE_UNUSED,
32637 tree args,
32638 int flags ATTRIBUTE_UNUSED,
32639 bool *no_add_attrs)
32641 tree type = *node, result = NULL_TREE;
32642 machine_mode mode;
32643 int unsigned_p;
32644 char altivec_type
32645 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32646 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32647 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32648 : '?');
32650 while (POINTER_TYPE_P (type)
32651 || TREE_CODE (type) == FUNCTION_TYPE
32652 || TREE_CODE (type) == METHOD_TYPE
32653 || TREE_CODE (type) == ARRAY_TYPE)
32654 type = TREE_TYPE (type);
32656 mode = TYPE_MODE (type);
32658 /* Check for invalid AltiVec type qualifiers. */
32659 if (type == long_double_type_node)
32660 error ("use of %<long double%> in AltiVec types is invalid");
32661 else if (type == boolean_type_node)
32662 error ("use of boolean types in AltiVec types is invalid");
32663 else if (TREE_CODE (type) == COMPLEX_TYPE)
32664 error ("use of %<complex%> in AltiVec types is invalid");
32665 else if (DECIMAL_FLOAT_MODE_P (mode))
32666 error ("use of decimal floating point types in AltiVec types is invalid");
32667 else if (!TARGET_VSX)
32669 if (type == long_unsigned_type_node || type == long_integer_type_node)
32671 if (TARGET_64BIT)
32672 error ("use of %<long%> in AltiVec types is invalid for "
32673 "64-bit code without -mvsx");
32674 else if (rs6000_warn_altivec_long)
32675 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32676 "use %<int%>");
32678 else if (type == long_long_unsigned_type_node
32679 || type == long_long_integer_type_node)
32680 error ("use of %<long long%> in AltiVec types is invalid without "
32681 "-mvsx");
32682 else if (type == double_type_node)
32683 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
32686 switch (altivec_type)
32688 case 'v':
32689 unsigned_p = TYPE_UNSIGNED (type);
32690 switch (mode)
32692 case TImode:
32693 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32694 break;
32695 case DImode:
32696 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32697 break;
32698 case SImode:
32699 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32700 break;
32701 case HImode:
32702 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32703 break;
32704 case QImode:
32705 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32706 break;
32707 case SFmode: result = V4SF_type_node; break;
32708 case DFmode: result = V2DF_type_node; break;
32709 /* If the user says 'vector int bool', we may be handed the 'bool'
32710 attribute _before_ the 'vector' attribute, and so select the
32711 proper type in the 'b' case below. */
32712 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
32713 case V2DImode: case V2DFmode:
32714 result = type;
32715 default: break;
32717 break;
32718 case 'b':
32719 switch (mode)
32721 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
32722 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
32723 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
32724 case QImode: case V16QImode: result = bool_V16QI_type_node;
32725 default: break;
32727 break;
32728 case 'p':
32729 switch (mode)
32731 case V8HImode: result = pixel_V8HI_type_node;
32732 default: break;
32734 default: break;
32737 /* Propagate qualifiers attached to the element type
32738 onto the vector type. */
32739 if (result && result != type && TYPE_QUALS (type))
32740 result = build_qualified_type (result, TYPE_QUALS (type));
32742 *no_add_attrs = true; /* No need to hang on to the attribute. */
32744 if (result)
32745 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32747 return NULL_TREE;
32750 /* AltiVec defines four built-in scalar types that serve as vector
32751 elements; we must teach the compiler how to mangle them. */
32753 static const char *
32754 rs6000_mangle_type (const_tree type)
32756 type = TYPE_MAIN_VARIANT (type);
32758 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32759 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32760 return NULL;
32762 if (type == bool_char_type_node) return "U6__boolc";
32763 if (type == bool_short_type_node) return "U6__bools";
32764 if (type == pixel_type_node) return "u7__pixel";
32765 if (type == bool_int_type_node) return "U6__booli";
32766 if (type == bool_long_type_node) return "U6__booll";
32768 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32769 "g" for IBM extended double, no matter whether it is long double (using
32770 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32771 if (TARGET_FLOAT128)
32773 if (type == ieee128_float_type_node)
32774 return "U10__float128";
32776 if (type == ibm128_float_type_node)
32777 return "g";
32779 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
32780 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32783 /* Mangle IBM extended float long double as `g' (__float128) on
32784 powerpc*-linux where long-double-64 previously was the default. */
32785 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32786 && TARGET_ELF
32787 && TARGET_LONG_DOUBLE_128
32788 && !TARGET_IEEEQUAD)
32789 return "g";
32791 /* For all other types, use normal C++ mangling. */
32792 return NULL;
32795 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32796 struct attribute_spec.handler. */
32798 static tree
32799 rs6000_handle_longcall_attribute (tree *node, tree name,
32800 tree args ATTRIBUTE_UNUSED,
32801 int flags ATTRIBUTE_UNUSED,
32802 bool *no_add_attrs)
32804 if (TREE_CODE (*node) != FUNCTION_TYPE
32805 && TREE_CODE (*node) != FIELD_DECL
32806 && TREE_CODE (*node) != TYPE_DECL)
32808 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32809 name);
32810 *no_add_attrs = true;
32813 return NULL_TREE;
32816 /* Set longcall attributes on all functions declared when
32817 rs6000_default_long_calls is true. */
32818 static void
32819 rs6000_set_default_type_attributes (tree type)
32821 if (rs6000_default_long_calls
32822 && (TREE_CODE (type) == FUNCTION_TYPE
32823 || TREE_CODE (type) == METHOD_TYPE))
32824 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32825 NULL_TREE,
32826 TYPE_ATTRIBUTES (type));
32828 #if TARGET_MACHO
32829 darwin_set_default_type_attributes (type);
32830 #endif
32833 /* Return a reference suitable for calling a function with the
32834 longcall attribute. */
32837 rs6000_longcall_ref (rtx call_ref)
32839 const char *call_name;
32840 tree node;
32842 if (GET_CODE (call_ref) != SYMBOL_REF)
32843 return call_ref;
32845 /* System V adds '.' to the internal name, so skip them. */
32846 call_name = XSTR (call_ref, 0);
32847 if (*call_name == '.')
32849 while (*call_name == '.')
32850 call_name++;
32852 node = get_identifier (call_name);
32853 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32856 return force_reg (Pmode, call_ref);
32859 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32860 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32861 #endif
32863 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32864 struct attribute_spec.handler. */
32865 static tree
32866 rs6000_handle_struct_attribute (tree *node, tree name,
32867 tree args ATTRIBUTE_UNUSED,
32868 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32870 tree *type = NULL;
32871 if (DECL_P (*node))
32873 if (TREE_CODE (*node) == TYPE_DECL)
32874 type = &TREE_TYPE (*node);
32876 else
32877 type = node;
32879 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32880 || TREE_CODE (*type) == UNION_TYPE)))
32882 warning (OPT_Wattributes, "%qE attribute ignored", name);
32883 *no_add_attrs = true;
32886 else if ((is_attribute_p ("ms_struct", name)
32887 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32888 || ((is_attribute_p ("gcc_struct", name)
32889 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32891 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32892 name);
32893 *no_add_attrs = true;
32896 return NULL_TREE;
32899 static bool
32900 rs6000_ms_bitfield_layout_p (const_tree record_type)
32902 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32903 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32904 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32907 #ifdef USING_ELFOS_H
32909 /* A get_unnamed_section callback, used for switching to toc_section. */
32911 static void
32912 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32914 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32915 && TARGET_MINIMAL_TOC)
32917 if (!toc_initialized)
32919 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32920 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32921 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32922 fprintf (asm_out_file, "\t.tc ");
32923 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32924 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32925 fprintf (asm_out_file, "\n");
32927 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32928 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32929 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32930 fprintf (asm_out_file, " = .+32768\n");
32931 toc_initialized = 1;
32933 else
32934 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32936 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32938 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32939 if (!toc_initialized)
32941 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32942 toc_initialized = 1;
32945 else
32947 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32948 if (!toc_initialized)
32950 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32951 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32952 fprintf (asm_out_file, " = .+32768\n");
32953 toc_initialized = 1;
32958 /* Implement TARGET_ASM_INIT_SECTIONS. */
32960 static void
32961 rs6000_elf_asm_init_sections (void)
32963 toc_section
32964 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32966 sdata2_section
32967 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32968 SDATA2_SECTION_ASM_OP);
32971 /* Implement TARGET_SELECT_RTX_SECTION. */
32973 static section *
32974 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32975 unsigned HOST_WIDE_INT align)
32977 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32978 return toc_section;
32979 else
32980 return default_elf_select_rtx_section (mode, x, align);
32983 /* For a SYMBOL_REF, set generic flags and then perform some
32984 target-specific processing.
32986 When the AIX ABI is requested on a non-AIX system, replace the
32987 function name with the real name (with a leading .) rather than the
32988 function descriptor name. This saves a lot of overriding code to
32989 read the prefixes. */
32991 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32992 static void
32993 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32995 default_encode_section_info (decl, rtl, first);
32997 if (first
32998 && TREE_CODE (decl) == FUNCTION_DECL
32999 && !TARGET_AIX
33000 && DEFAULT_ABI == ABI_AIX)
33002 rtx sym_ref = XEXP (rtl, 0);
33003 size_t len = strlen (XSTR (sym_ref, 0));
33004 char *str = XALLOCAVEC (char, len + 2);
33005 str[0] = '.';
33006 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33007 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33011 static inline bool
33012 compare_section_name (const char *section, const char *templ)
33014 int len;
33016 len = strlen (templ);
33017 return (strncmp (section, templ, len) == 0
33018 && (section[len] == 0 || section[len] == '.'));
33021 bool
33022 rs6000_elf_in_small_data_p (const_tree decl)
33024 if (rs6000_sdata == SDATA_NONE)
33025 return false;
33027 /* We want to merge strings, so we never consider them small data. */
33028 if (TREE_CODE (decl) == STRING_CST)
33029 return false;
33031 /* Functions are never in the small data area. */
33032 if (TREE_CODE (decl) == FUNCTION_DECL)
33033 return false;
33035 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33037 const char *section = DECL_SECTION_NAME (decl);
33038 if (compare_section_name (section, ".sdata")
33039 || compare_section_name (section, ".sdata2")
33040 || compare_section_name (section, ".gnu.linkonce.s")
33041 || compare_section_name (section, ".sbss")
33042 || compare_section_name (section, ".sbss2")
33043 || compare_section_name (section, ".gnu.linkonce.sb")
33044 || strcmp (section, ".PPC.EMB.sdata0") == 0
33045 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33046 return true;
33048 else
33050 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33052 if (size > 0
33053 && size <= g_switch_value
33054 /* If it's not public, and we're not going to reference it there,
33055 there's no need to put it in the small data section. */
33056 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33057 return true;
33060 return false;
33063 #endif /* USING_ELFOS_H */
33065 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33067 static bool
33068 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33070 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33073 /* Do not place thread-local symbols refs in the object blocks. */
33075 static bool
33076 rs6000_use_blocks_for_decl_p (const_tree decl)
33078 return !DECL_THREAD_LOCAL_P (decl);
33081 /* Return a REG that occurs in ADDR with coefficient 1.
33082 ADDR can be effectively incremented by incrementing REG.
33084 r0 is special and we must not select it as an address
33085 register by this routine since our caller will try to
33086 increment the returned register via an "la" instruction. */
33089 find_addr_reg (rtx addr)
33091 while (GET_CODE (addr) == PLUS)
33093 if (GET_CODE (XEXP (addr, 0)) == REG
33094 && REGNO (XEXP (addr, 0)) != 0)
33095 addr = XEXP (addr, 0);
33096 else if (GET_CODE (XEXP (addr, 1)) == REG
33097 && REGNO (XEXP (addr, 1)) != 0)
33098 addr = XEXP (addr, 1);
33099 else if (CONSTANT_P (XEXP (addr, 0)))
33100 addr = XEXP (addr, 1);
33101 else if (CONSTANT_P (XEXP (addr, 1)))
33102 addr = XEXP (addr, 0);
33103 else
33104 gcc_unreachable ();
33106 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
33107 return addr;
33110 void
33111 rs6000_fatal_bad_address (rtx op)
33113 fatal_insn ("bad address", op);
33116 #if TARGET_MACHO
33118 typedef struct branch_island_d {
33119 tree function_name;
33120 tree label_name;
33121 int line_number;
33122 } branch_island;
33125 static vec<branch_island, va_gc> *branch_islands;
33127 /* Remember to generate a branch island for far calls to the given
33128 function. */
33130 static void
33131 add_compiler_branch_island (tree label_name, tree function_name,
33132 int line_number)
33134 branch_island bi = {function_name, label_name, line_number};
33135 vec_safe_push (branch_islands, bi);
33138 /* Generate far-jump branch islands for everything recorded in
33139 branch_islands. Invoked immediately after the last instruction of
33140 the epilogue has been emitted; the branch islands must be appended
33141 to, and contiguous with, the function body. Mach-O stubs are
33142 generated in machopic_output_stub(). */
33144 static void
33145 macho_branch_islands (void)
33147 char tmp_buf[512];
33149 while (!vec_safe_is_empty (branch_islands))
33151 branch_island *bi = &branch_islands->last ();
33152 const char *label = IDENTIFIER_POINTER (bi->label_name);
33153 const char *name = IDENTIFIER_POINTER (bi->function_name);
33154 char name_buf[512];
33155 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33156 if (name[0] == '*' || name[0] == '&')
33157 strcpy (name_buf, name+1);
33158 else
33160 name_buf[0] = '_';
33161 strcpy (name_buf+1, name);
33163 strcpy (tmp_buf, "\n");
33164 strcat (tmp_buf, label);
33165 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33166 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33167 dbxout_stabd (N_SLINE, bi->line_number);
33168 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33169 if (flag_pic)
33171 if (TARGET_LINK_STACK)
33173 char name[32];
33174 get_ppc476_thunk_name (name);
33175 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33176 strcat (tmp_buf, name);
33177 strcat (tmp_buf, "\n");
33178 strcat (tmp_buf, label);
33179 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33181 else
33183 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33184 strcat (tmp_buf, label);
33185 strcat (tmp_buf, "_pic\n");
33186 strcat (tmp_buf, label);
33187 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33190 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33191 strcat (tmp_buf, name_buf);
33192 strcat (tmp_buf, " - ");
33193 strcat (tmp_buf, label);
33194 strcat (tmp_buf, "_pic)\n");
33196 strcat (tmp_buf, "\tmtlr r0\n");
33198 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33199 strcat (tmp_buf, name_buf);
33200 strcat (tmp_buf, " - ");
33201 strcat (tmp_buf, label);
33202 strcat (tmp_buf, "_pic)\n");
33204 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33206 else
33208 strcat (tmp_buf, ":\nlis r12,hi16(");
33209 strcat (tmp_buf, name_buf);
33210 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33211 strcat (tmp_buf, name_buf);
33212 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33214 output_asm_insn (tmp_buf, 0);
33215 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33216 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33217 dbxout_stabd (N_SLINE, bi->line_number);
33218 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33219 branch_islands->pop ();
33223 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33224 already there or not. */
33226 static int
33227 no_previous_def (tree function_name)
33229 branch_island *bi;
33230 unsigned ix;
33232 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33233 if (function_name == bi->function_name)
33234 return 0;
33235 return 1;
33238 /* GET_PREV_LABEL gets the label name from the previous definition of
33239 the function. */
33241 static tree
33242 get_prev_label (tree function_name)
33244 branch_island *bi;
33245 unsigned ix;
33247 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33248 if (function_name == bi->function_name)
33249 return bi->label_name;
33250 return NULL_TREE;
33253 /* INSN is either a function call or a millicode call. It may have an
33254 unconditional jump in its delay slot.
33256 CALL_DEST is the routine we are calling. */
33258 char *
33259 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33260 int cookie_operand_number)
33262 static char buf[256];
33263 if (darwin_emit_branch_islands
33264 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33265 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33267 tree labelname;
33268 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33270 if (no_previous_def (funname))
33272 rtx label_rtx = gen_label_rtx ();
33273 char *label_buf, temp_buf[256];
33274 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33275 CODE_LABEL_NUMBER (label_rtx));
33276 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33277 labelname = get_identifier (label_buf);
33278 add_compiler_branch_island (labelname, funname, insn_line (insn));
33280 else
33281 labelname = get_prev_label (funname);
33283 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33284 instruction will reach 'foo', otherwise link as 'bl L42'".
33285 "L42" should be a 'branch island', that will do a far jump to
33286 'foo'. Branch islands are generated in
33287 macho_branch_islands(). */
33288 sprintf (buf, "jbsr %%z%d,%.246s",
33289 dest_operand_number, IDENTIFIER_POINTER (labelname));
33291 else
33292 sprintf (buf, "bl %%z%d", dest_operand_number);
33293 return buf;
33296 /* Generate PIC and indirect symbol stubs. */
33298 void
33299 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33301 unsigned int length;
33302 char *symbol_name, *lazy_ptr_name;
33303 char *local_label_0;
33304 static int label = 0;
33306 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33307 symb = (*targetm.strip_name_encoding) (symb);
33310 length = strlen (symb);
33311 symbol_name = XALLOCAVEC (char, length + 32);
33312 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33314 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33315 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33317 if (flag_pic == 2)
33318 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33319 else
33320 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33322 if (flag_pic == 2)
33324 fprintf (file, "\t.align 5\n");
33326 fprintf (file, "%s:\n", stub);
33327 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33329 label++;
33330 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33331 sprintf (local_label_0, "\"L%011d$spb\"", label);
33333 fprintf (file, "\tmflr r0\n");
33334 if (TARGET_LINK_STACK)
33336 char name[32];
33337 get_ppc476_thunk_name (name);
33338 fprintf (file, "\tbl %s\n", name);
33339 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33341 else
33343 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33344 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33346 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33347 lazy_ptr_name, local_label_0);
33348 fprintf (file, "\tmtlr r0\n");
33349 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33350 (TARGET_64BIT ? "ldu" : "lwzu"),
33351 lazy_ptr_name, local_label_0);
33352 fprintf (file, "\tmtctr r12\n");
33353 fprintf (file, "\tbctr\n");
33355 else
33357 fprintf (file, "\t.align 4\n");
33359 fprintf (file, "%s:\n", stub);
33360 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33362 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33363 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33364 (TARGET_64BIT ? "ldu" : "lwzu"),
33365 lazy_ptr_name);
33366 fprintf (file, "\tmtctr r12\n");
33367 fprintf (file, "\tbctr\n");
33370 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33371 fprintf (file, "%s:\n", lazy_ptr_name);
33372 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33373 fprintf (file, "%sdyld_stub_binding_helper\n",
33374 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33377 /* Legitimize PIC addresses. If the address is already
33378 position-independent, we return ORIG. Newly generated
33379 position-independent addresses go into a reg. This is REG if non
33380 zero, otherwise we allocate register(s) as necessary. */
33382 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33385 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33386 rtx reg)
33388 rtx base, offset;
33390 if (reg == NULL && ! reload_in_progress && ! reload_completed)
33391 reg = gen_reg_rtx (Pmode);
33393 if (GET_CODE (orig) == CONST)
33395 rtx reg_temp;
33397 if (GET_CODE (XEXP (orig, 0)) == PLUS
33398 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33399 return orig;
33401 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33403 /* Use a different reg for the intermediate value, as
33404 it will be marked UNCHANGING. */
33405 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33406 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33407 Pmode, reg_temp);
33408 offset =
33409 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33410 Pmode, reg);
33412 if (GET_CODE (offset) == CONST_INT)
33414 if (SMALL_INT (offset))
33415 return plus_constant (Pmode, base, INTVAL (offset));
33416 else if (! reload_in_progress && ! reload_completed)
33417 offset = force_reg (Pmode, offset);
33418 else
33420 rtx mem = force_const_mem (Pmode, orig);
33421 return machopic_legitimize_pic_address (mem, Pmode, reg);
33424 return gen_rtx_PLUS (Pmode, base, offset);
33427 /* Fall back on generic machopic code. */
33428 return machopic_legitimize_pic_address (orig, mode, reg);
33431 /* Output a .machine directive for the Darwin assembler, and call
33432 the generic start_file routine. */
33434 static void
33435 rs6000_darwin_file_start (void)
33437 static const struct
33439 const char *arg;
33440 const char *name;
33441 HOST_WIDE_INT if_set;
33442 } mapping[] = {
33443 { "ppc64", "ppc64", MASK_64BIT },
33444 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33445 { "power4", "ppc970", 0 },
33446 { "G5", "ppc970", 0 },
33447 { "7450", "ppc7450", 0 },
33448 { "7400", "ppc7400", MASK_ALTIVEC },
33449 { "G4", "ppc7400", 0 },
33450 { "750", "ppc750", 0 },
33451 { "740", "ppc750", 0 },
33452 { "G3", "ppc750", 0 },
33453 { "604e", "ppc604e", 0 },
33454 { "604", "ppc604", 0 },
33455 { "603e", "ppc603", 0 },
33456 { "603", "ppc603", 0 },
33457 { "601", "ppc601", 0 },
33458 { NULL, "ppc", 0 } };
33459 const char *cpu_id = "";
33460 size_t i;
33462 rs6000_file_start ();
33463 darwin_file_start ();
33465 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33467 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33468 cpu_id = rs6000_default_cpu;
33470 if (global_options_set.x_rs6000_cpu_index)
33471 cpu_id = processor_target_table[rs6000_cpu_index].name;
33473 /* Look through the mapping array. Pick the first name that either
33474 matches the argument, has a bit set in IF_SET that is also set
33475 in the target flags, or has a NULL name. */
33477 i = 0;
33478 while (mapping[i].arg != NULL
33479 && strcmp (mapping[i].arg, cpu_id) != 0
33480 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33481 i++;
33483 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33486 #endif /* TARGET_MACHO */
33488 #if TARGET_ELF
33489 static int
33490 rs6000_elf_reloc_rw_mask (void)
33492 if (flag_pic)
33493 return 3;
33494 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33495 return 2;
33496 else
33497 return 0;
33500 /* Record an element in the table of global constructors. SYMBOL is
33501 a SYMBOL_REF of the function to be called; PRIORITY is a number
33502 between 0 and MAX_INIT_PRIORITY.
33504 This differs from default_named_section_asm_out_constructor in
33505 that we have special handling for -mrelocatable. */
33507 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33508 static void
33509 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33511 const char *section = ".ctors";
33512 char buf[16];
33514 if (priority != DEFAULT_INIT_PRIORITY)
33516 sprintf (buf, ".ctors.%.5u",
33517 /* Invert the numbering so the linker puts us in the proper
33518 order; constructors are run from right to left, and the
33519 linker sorts in increasing order. */
33520 MAX_INIT_PRIORITY - priority);
33521 section = buf;
33524 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33525 assemble_align (POINTER_SIZE);
33527 if (DEFAULT_ABI == ABI_V4
33528 && (TARGET_RELOCATABLE || flag_pic > 1))
33530 fputs ("\t.long (", asm_out_file);
33531 output_addr_const (asm_out_file, symbol);
33532 fputs (")@fixup\n", asm_out_file);
33534 else
33535 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33538 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33539 static void
33540 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33542 const char *section = ".dtors";
33543 char buf[16];
33545 if (priority != DEFAULT_INIT_PRIORITY)
33547 sprintf (buf, ".dtors.%.5u",
33548 /* Invert the numbering so the linker puts us in the proper
33549 order; constructors are run from right to left, and the
33550 linker sorts in increasing order. */
33551 MAX_INIT_PRIORITY - priority);
33552 section = buf;
33555 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33556 assemble_align (POINTER_SIZE);
33558 if (DEFAULT_ABI == ABI_V4
33559 && (TARGET_RELOCATABLE || flag_pic > 1))
33561 fputs ("\t.long (", asm_out_file);
33562 output_addr_const (asm_out_file, symbol);
33563 fputs (")@fixup\n", asm_out_file);
33565 else
33566 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33569 void
33570 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33572 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33574 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33575 ASM_OUTPUT_LABEL (file, name);
33576 fputs (DOUBLE_INT_ASM_OP, file);
33577 rs6000_output_function_entry (file, name);
33578 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33579 if (DOT_SYMBOLS)
33581 fputs ("\t.size\t", file);
33582 assemble_name (file, name);
33583 fputs (",24\n\t.type\t.", file);
33584 assemble_name (file, name);
33585 fputs (",@function\n", file);
33586 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33588 fputs ("\t.globl\t.", file);
33589 assemble_name (file, name);
33590 putc ('\n', file);
33593 else
33594 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33595 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33596 rs6000_output_function_entry (file, name);
33597 fputs (":\n", file);
33598 return;
33601 if (DEFAULT_ABI == ABI_V4
33602 && (TARGET_RELOCATABLE || flag_pic > 1)
33603 && !TARGET_SECURE_PLT
33604 && (get_pool_size () != 0 || crtl->profile)
33605 && uses_TOC ())
33607 char buf[256];
33609 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33611 fprintf (file, "\t.long ");
33612 assemble_name (file, toc_label_name);
33613 need_toc_init = 1;
33614 putc ('-', file);
33615 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33616 assemble_name (file, buf);
33617 putc ('\n', file);
33620 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33621 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33623 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33625 char buf[256];
33627 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33629 fprintf (file, "\t.quad .TOC.-");
33630 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33631 assemble_name (file, buf);
33632 putc ('\n', file);
33635 if (DEFAULT_ABI == ABI_AIX)
33637 const char *desc_name, *orig_name;
33639 orig_name = (*targetm.strip_name_encoding) (name);
33640 desc_name = orig_name;
33641 while (*desc_name == '.')
33642 desc_name++;
33644 if (TREE_PUBLIC (decl))
33645 fprintf (file, "\t.globl %s\n", desc_name);
33647 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33648 fprintf (file, "%s:\n", desc_name);
33649 fprintf (file, "\t.long %s\n", orig_name);
33650 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33651 fputs ("\t.long 0\n", file);
33652 fprintf (file, "\t.previous\n");
33654 ASM_OUTPUT_LABEL (file, name);
33657 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33658 static void
33659 rs6000_elf_file_end (void)
33661 #ifdef HAVE_AS_GNU_ATTRIBUTE
33662 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33664 if (rs6000_passes_float)
33665 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
33666 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
33667 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
33668 : 2));
33669 if (rs6000_passes_vector)
33670 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33671 (TARGET_ALTIVEC_ABI ? 2
33672 : TARGET_SPE_ABI ? 3
33673 : 1));
33674 if (rs6000_returns_struct)
33675 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33676 aix_struct_return ? 2 : 1);
33678 #endif
33679 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33680 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33681 file_end_indicate_exec_stack ();
33682 #endif
33684 if (flag_split_stack)
33685 file_end_indicate_split_stack ();
33687 if (cpu_builtin_p)
33689 /* We have expanded a CPU builtin, so we need to emit a reference to
33690 the special symbol that LIBC uses to declare it supports the
33691 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33692 switch_to_section (data_section);
33693 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33694 fprintf (asm_out_file, "\t%s %s\n",
33695 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33698 #endif
33700 #if TARGET_XCOFF
33702 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33703 #define HAVE_XCOFF_DWARF_EXTRAS 0
33704 #endif
33706 static enum unwind_info_type
33707 rs6000_xcoff_debug_unwind_info (void)
33709 return UI_NONE;
33712 static void
33713 rs6000_xcoff_asm_output_anchor (rtx symbol)
33715 char buffer[100];
33717 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33718 SYMBOL_REF_BLOCK_OFFSET (symbol));
33719 fprintf (asm_out_file, "%s", SET_ASM_OP);
33720 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33721 fprintf (asm_out_file, ",");
33722 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33723 fprintf (asm_out_file, "\n");
33726 static void
33727 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33729 fputs (GLOBAL_ASM_OP, stream);
33730 RS6000_OUTPUT_BASENAME (stream, name);
33731 putc ('\n', stream);
33734 /* A get_unnamed_decl callback, used for read-only sections. PTR
33735 points to the section string variable. */
33737 static void
33738 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33740 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33741 *(const char *const *) directive,
33742 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33745 /* Likewise for read-write sections. */
33747 static void
33748 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33750 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33751 *(const char *const *) directive,
33752 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33755 static void
33756 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33758 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33759 *(const char *const *) directive,
33760 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33763 /* A get_unnamed_section callback, used for switching to toc_section. */
33765 static void
33766 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33768 if (TARGET_MINIMAL_TOC)
33770 /* toc_section is always selected at least once from
33771 rs6000_xcoff_file_start, so this is guaranteed to
33772 always be defined once and only once in each file. */
33773 if (!toc_initialized)
33775 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33776 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33777 toc_initialized = 1;
33779 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33780 (TARGET_32BIT ? "" : ",3"));
33782 else
33783 fputs ("\t.toc\n", asm_out_file);
33786 /* Implement TARGET_ASM_INIT_SECTIONS. */
33788 static void
33789 rs6000_xcoff_asm_init_sections (void)
33791 read_only_data_section
33792 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33793 &xcoff_read_only_section_name);
33795 private_data_section
33796 = get_unnamed_section (SECTION_WRITE,
33797 rs6000_xcoff_output_readwrite_section_asm_op,
33798 &xcoff_private_data_section_name);
33800 tls_data_section
33801 = get_unnamed_section (SECTION_TLS,
33802 rs6000_xcoff_output_tls_section_asm_op,
33803 &xcoff_tls_data_section_name);
33805 tls_private_data_section
33806 = get_unnamed_section (SECTION_TLS,
33807 rs6000_xcoff_output_tls_section_asm_op,
33808 &xcoff_private_data_section_name);
33810 read_only_private_data_section
33811 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33812 &xcoff_private_data_section_name);
33814 toc_section
33815 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33817 readonly_data_section = read_only_data_section;
33820 static int
33821 rs6000_xcoff_reloc_rw_mask (void)
33823 return 3;
33826 static void
33827 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33828 tree decl ATTRIBUTE_UNUSED)
33830 int smclass;
33831 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33833 if (flags & SECTION_EXCLUDE)
33834 smclass = 4;
33835 else if (flags & SECTION_DEBUG)
33837 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33838 return;
33840 else if (flags & SECTION_CODE)
33841 smclass = 0;
33842 else if (flags & SECTION_TLS)
33843 smclass = 3;
33844 else if (flags & SECTION_WRITE)
33845 smclass = 2;
33846 else
33847 smclass = 1;
33849 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33850 (flags & SECTION_CODE) ? "." : "",
33851 name, suffix[smclass], flags & SECTION_ENTSIZE);
33854 #define IN_NAMED_SECTION(DECL) \
33855 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33856 && DECL_SECTION_NAME (DECL) != NULL)
33858 static section *
33859 rs6000_xcoff_select_section (tree decl, int reloc,
33860 unsigned HOST_WIDE_INT align)
33862 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33863 named section. */
33864 if (align > BIGGEST_ALIGNMENT)
33866 resolve_unique_section (decl, reloc, true);
33867 if (IN_NAMED_SECTION (decl))
33868 return get_named_section (decl, NULL, reloc);
33871 if (decl_readonly_section (decl, reloc))
33873 if (TREE_PUBLIC (decl))
33874 return read_only_data_section;
33875 else
33876 return read_only_private_data_section;
33878 else
33880 #if HAVE_AS_TLS
33881 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33883 if (TREE_PUBLIC (decl))
33884 return tls_data_section;
33885 else if (bss_initializer_p (decl))
33887 /* Convert to COMMON to emit in BSS. */
33888 DECL_COMMON (decl) = 1;
33889 return tls_comm_section;
33891 else
33892 return tls_private_data_section;
33894 else
33895 #endif
33896 if (TREE_PUBLIC (decl))
33897 return data_section;
33898 else
33899 return private_data_section;
33903 static void
33904 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33906 const char *name;
33908 /* Use select_section for private data and uninitialized data with
33909 alignment <= BIGGEST_ALIGNMENT. */
33910 if (!TREE_PUBLIC (decl)
33911 || DECL_COMMON (decl)
33912 || (DECL_INITIAL (decl) == NULL_TREE
33913 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33914 || DECL_INITIAL (decl) == error_mark_node
33915 || (flag_zero_initialized_in_bss
33916 && initializer_zerop (DECL_INITIAL (decl))))
33917 return;
33919 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33920 name = (*targetm.strip_name_encoding) (name);
33921 set_decl_section_name (decl, name);
33924 /* Select section for constant in constant pool.
33926 On RS/6000, all constants are in the private read-only data area.
33927 However, if this is being placed in the TOC it must be output as a
33928 toc entry. */
33930 static section *
33931 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33932 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33934 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33935 return toc_section;
33936 else
33937 return read_only_private_data_section;
33940 /* Remove any trailing [DS] or the like from the symbol name. */
33942 static const char *
33943 rs6000_xcoff_strip_name_encoding (const char *name)
33945 size_t len;
33946 if (*name == '*')
33947 name++;
33948 len = strlen (name);
33949 if (name[len - 1] == ']')
33950 return ggc_alloc_string (name, len - 4);
33951 else
33952 return name;
33955 /* Section attributes. AIX is always PIC. */
33957 static unsigned int
33958 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33960 unsigned int align;
33961 unsigned int flags = default_section_type_flags (decl, name, reloc);
33963 /* Align to at least UNIT size. */
33964 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33965 align = MIN_UNITS_PER_WORD;
33966 else
33967 /* Increase alignment of large objects if not already stricter. */
33968 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33969 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33970 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33972 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33975 /* Output at beginning of assembler file.
33977 Initialize the section names for the RS/6000 at this point.
33979 Specify filename, including full path, to assembler.
33981 We want to go into the TOC section so at least one .toc will be emitted.
33982 Also, in order to output proper .bs/.es pairs, we need at least one static
33983 [RW] section emitted.
33985 Finally, declare mcount when profiling to make the assembler happy. */
33987 static void
33988 rs6000_xcoff_file_start (void)
33990 rs6000_gen_section_name (&xcoff_bss_section_name,
33991 main_input_filename, ".bss_");
33992 rs6000_gen_section_name (&xcoff_private_data_section_name,
33993 main_input_filename, ".rw_");
33994 rs6000_gen_section_name (&xcoff_read_only_section_name,
33995 main_input_filename, ".ro_");
33996 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33997 main_input_filename, ".tls_");
33998 rs6000_gen_section_name (&xcoff_tbss_section_name,
33999 main_input_filename, ".tbss_[UL]");
34001 fputs ("\t.file\t", asm_out_file);
34002 output_quoted_string (asm_out_file, main_input_filename);
34003 fputc ('\n', asm_out_file);
34004 if (write_symbols != NO_DEBUG)
34005 switch_to_section (private_data_section);
34006 switch_to_section (toc_section);
34007 switch_to_section (text_section);
34008 if (profile_flag)
34009 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34010 rs6000_file_start ();
34013 /* Output at end of assembler file.
34014 On the RS/6000, referencing data should automatically pull in text. */
34016 static void
34017 rs6000_xcoff_file_end (void)
34019 switch_to_section (text_section);
34020 fputs ("_section_.text:\n", asm_out_file);
34021 switch_to_section (data_section);
34022 fputs (TARGET_32BIT
34023 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34024 asm_out_file);
34027 struct declare_alias_data
34029 FILE *file;
34030 bool function_descriptor;
34033 /* Declare alias N. A helper function for for_node_and_aliases. */
34035 static bool
34036 rs6000_declare_alias (struct symtab_node *n, void *d)
34038 struct declare_alias_data *data = (struct declare_alias_data *)d;
34039 /* Main symbol is output specially, because varasm machinery does part of
34040 the job for us - we do not need to declare .globl/lglobs and such. */
34041 if (!n->alias || n->weakref)
34042 return false;
34044 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34045 return false;
34047 /* Prevent assemble_alias from trying to use .set pseudo operation
34048 that does not behave as expected by the middle-end. */
34049 TREE_ASM_WRITTEN (n->decl) = true;
34051 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34052 char *buffer = (char *) alloca (strlen (name) + 2);
34053 char *p;
34054 int dollar_inside = 0;
34056 strcpy (buffer, name);
34057 p = strchr (buffer, '$');
34058 while (p) {
34059 *p = '_';
34060 dollar_inside++;
34061 p = strchr (p + 1, '$');
34063 if (TREE_PUBLIC (n->decl))
34065 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34067 if (dollar_inside) {
34068 if (data->function_descriptor)
34069 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34070 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34072 if (data->function_descriptor)
34074 fputs ("\t.globl .", data->file);
34075 RS6000_OUTPUT_BASENAME (data->file, buffer);
34076 putc ('\n', data->file);
34078 fputs ("\t.globl ", data->file);
34079 RS6000_OUTPUT_BASENAME (data->file, buffer);
34080 putc ('\n', data->file);
34082 #ifdef ASM_WEAKEN_DECL
34083 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34084 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34085 #endif
34087 else
34089 if (dollar_inside)
34091 if (data->function_descriptor)
34092 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34093 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34095 if (data->function_descriptor)
34097 fputs ("\t.lglobl .", data->file);
34098 RS6000_OUTPUT_BASENAME (data->file, buffer);
34099 putc ('\n', data->file);
34101 fputs ("\t.lglobl ", data->file);
34102 RS6000_OUTPUT_BASENAME (data->file, buffer);
34103 putc ('\n', data->file);
34105 if (data->function_descriptor)
34106 fputs (".", data->file);
34107 RS6000_OUTPUT_BASENAME (data->file, buffer);
34108 fputs (":\n", data->file);
34109 return false;
34112 /* This macro produces the initial definition of a function name.
34113 On the RS/6000, we need to place an extra '.' in the function name and
34114 output the function descriptor.
34115 Dollar signs are converted to underscores.
34117 The csect for the function will have already been created when
34118 text_section was selected. We do have to go back to that csect, however.
34120 The third and fourth parameters to the .function pseudo-op (16 and 044)
34121 are placeholders which no longer have any use.
34123 Because AIX assembler's .set command has unexpected semantics, we output
34124 all aliases as alternative labels in front of the definition. */
34126 void
34127 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34129 char *buffer = (char *) alloca (strlen (name) + 1);
34130 char *p;
34131 int dollar_inside = 0;
34132 struct declare_alias_data data = {file, false};
34134 strcpy (buffer, name);
34135 p = strchr (buffer, '$');
34136 while (p) {
34137 *p = '_';
34138 dollar_inside++;
34139 p = strchr (p + 1, '$');
34141 if (TREE_PUBLIC (decl))
34143 if (!RS6000_WEAK || !DECL_WEAK (decl))
34145 if (dollar_inside) {
34146 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34147 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34149 fputs ("\t.globl .", file);
34150 RS6000_OUTPUT_BASENAME (file, buffer);
34151 putc ('\n', file);
34154 else
34156 if (dollar_inside) {
34157 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34158 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34160 fputs ("\t.lglobl .", file);
34161 RS6000_OUTPUT_BASENAME (file, buffer);
34162 putc ('\n', file);
34164 fputs ("\t.csect ", file);
34165 RS6000_OUTPUT_BASENAME (file, buffer);
34166 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34167 RS6000_OUTPUT_BASENAME (file, buffer);
34168 fputs (":\n", file);
34169 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
34170 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34171 RS6000_OUTPUT_BASENAME (file, buffer);
34172 fputs (", TOC[tc0], 0\n", file);
34173 in_section = NULL;
34174 switch_to_section (function_section (decl));
34175 putc ('.', file);
34176 RS6000_OUTPUT_BASENAME (file, buffer);
34177 fputs (":\n", file);
34178 data.function_descriptor = true;
34179 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
34180 if (!DECL_IGNORED_P (decl))
34182 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34183 xcoffout_declare_function (file, decl, buffer);
34184 else if (write_symbols == DWARF2_DEBUG)
34186 name = (*targetm.strip_name_encoding) (name);
34187 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34190 return;
34193 /* This macro produces the initial definition of a object (variable) name.
34194 Because AIX assembler's .set command has unexpected semantics, we output
34195 all aliases as alternative labels in front of the definition. */
34197 void
34198 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34200 struct declare_alias_data data = {file, false};
34201 RS6000_OUTPUT_BASENAME (file, name);
34202 fputs (":\n", file);
34203 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
34206 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34208 void
34209 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34211 fputs (integer_asm_op (size, FALSE), file);
34212 assemble_name (file, label);
34213 fputs ("-$", file);
34216 /* Output a symbol offset relative to the dbase for the current object.
34217 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34218 signed offsets.
34220 __gcc_unwind_dbase is embedded in all executables/libraries through
34221 libgcc/config/rs6000/crtdbase.S. */
34223 void
34224 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34226 fputs (integer_asm_op (size, FALSE), file);
34227 assemble_name (file, label);
34228 fputs("-__gcc_unwind_dbase", file);
34231 #ifdef HAVE_AS_TLS
34232 static void
34233 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34235 rtx symbol;
34236 int flags;
34238 default_encode_section_info (decl, rtl, first);
34240 /* Careful not to prod global register variables. */
34241 if (!MEM_P (rtl))
34242 return;
34243 symbol = XEXP (rtl, 0);
34244 if (GET_CODE (symbol) != SYMBOL_REF)
34245 return;
34247 flags = SYMBOL_REF_FLAGS (symbol);
34249 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34250 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34252 SYMBOL_REF_FLAGS (symbol) = flags;
34254 #endif /* HAVE_AS_TLS */
34255 #endif /* TARGET_XCOFF */
34257 /* Return true if INSN should not be copied. */
34259 static bool
34260 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34262 return recog_memoized (insn) >= 0
34263 && get_attr_cannot_copy (insn);
34266 /* Compute a (partial) cost for rtx X. Return true if the complete
34267 cost has been computed, and false if subexpressions should be
34268 scanned. In either case, *TOTAL contains the cost result. */
34270 static bool
34271 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34272 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34274 int code = GET_CODE (x);
34276 switch (code)
34278 /* On the RS/6000, if it is valid in the insn, it is free. */
34279 case CONST_INT:
34280 if (((outer_code == SET
34281 || outer_code == PLUS
34282 || outer_code == MINUS)
34283 && (satisfies_constraint_I (x)
34284 || satisfies_constraint_L (x)))
34285 || (outer_code == AND
34286 && (satisfies_constraint_K (x)
34287 || (mode == SImode
34288 ? satisfies_constraint_L (x)
34289 : satisfies_constraint_J (x))))
34290 || ((outer_code == IOR || outer_code == XOR)
34291 && (satisfies_constraint_K (x)
34292 || (mode == SImode
34293 ? satisfies_constraint_L (x)
34294 : satisfies_constraint_J (x))))
34295 || outer_code == ASHIFT
34296 || outer_code == ASHIFTRT
34297 || outer_code == LSHIFTRT
34298 || outer_code == ROTATE
34299 || outer_code == ROTATERT
34300 || outer_code == ZERO_EXTRACT
34301 || (outer_code == MULT
34302 && satisfies_constraint_I (x))
34303 || ((outer_code == DIV || outer_code == UDIV
34304 || outer_code == MOD || outer_code == UMOD)
34305 && exact_log2 (INTVAL (x)) >= 0)
34306 || (outer_code == COMPARE
34307 && (satisfies_constraint_I (x)
34308 || satisfies_constraint_K (x)))
34309 || ((outer_code == EQ || outer_code == NE)
34310 && (satisfies_constraint_I (x)
34311 || satisfies_constraint_K (x)
34312 || (mode == SImode
34313 ? satisfies_constraint_L (x)
34314 : satisfies_constraint_J (x))))
34315 || (outer_code == GTU
34316 && satisfies_constraint_I (x))
34317 || (outer_code == LTU
34318 && satisfies_constraint_P (x)))
34320 *total = 0;
34321 return true;
34323 else if ((outer_code == PLUS
34324 && reg_or_add_cint_operand (x, VOIDmode))
34325 || (outer_code == MINUS
34326 && reg_or_sub_cint_operand (x, VOIDmode))
34327 || ((outer_code == SET
34328 || outer_code == IOR
34329 || outer_code == XOR)
34330 && (INTVAL (x)
34331 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34333 *total = COSTS_N_INSNS (1);
34334 return true;
34336 /* FALLTHRU */
34338 case CONST_DOUBLE:
34339 case CONST_WIDE_INT:
34340 case CONST:
34341 case HIGH:
34342 case SYMBOL_REF:
34343 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34344 return true;
34346 case MEM:
34347 /* When optimizing for size, MEM should be slightly more expensive
34348 than generating address, e.g., (plus (reg) (const)).
34349 L1 cache latency is about two instructions. */
34350 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34351 if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
34352 *total += COSTS_N_INSNS (100);
34353 return true;
34355 case LABEL_REF:
34356 *total = 0;
34357 return true;
34359 case PLUS:
34360 case MINUS:
34361 if (FLOAT_MODE_P (mode))
34362 *total = rs6000_cost->fp;
34363 else
34364 *total = COSTS_N_INSNS (1);
34365 return false;
34367 case MULT:
34368 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34369 && satisfies_constraint_I (XEXP (x, 1)))
34371 if (INTVAL (XEXP (x, 1)) >= -256
34372 && INTVAL (XEXP (x, 1)) <= 255)
34373 *total = rs6000_cost->mulsi_const9;
34374 else
34375 *total = rs6000_cost->mulsi_const;
34377 else if (mode == SFmode)
34378 *total = rs6000_cost->fp;
34379 else if (FLOAT_MODE_P (mode))
34380 *total = rs6000_cost->dmul;
34381 else if (mode == DImode)
34382 *total = rs6000_cost->muldi;
34383 else
34384 *total = rs6000_cost->mulsi;
34385 return false;
34387 case FMA:
34388 if (mode == SFmode)
34389 *total = rs6000_cost->fp;
34390 else
34391 *total = rs6000_cost->dmul;
34392 break;
34394 case DIV:
34395 case MOD:
34396 if (FLOAT_MODE_P (mode))
34398 *total = mode == DFmode ? rs6000_cost->ddiv
34399 : rs6000_cost->sdiv;
34400 return false;
34402 /* FALLTHRU */
34404 case UDIV:
34405 case UMOD:
34406 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34407 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34409 if (code == DIV || code == MOD)
34410 /* Shift, addze */
34411 *total = COSTS_N_INSNS (2);
34412 else
34413 /* Shift */
34414 *total = COSTS_N_INSNS (1);
34416 else
34418 if (GET_MODE (XEXP (x, 1)) == DImode)
34419 *total = rs6000_cost->divdi;
34420 else
34421 *total = rs6000_cost->divsi;
34423 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34424 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34425 *total += COSTS_N_INSNS (2);
34426 return false;
34428 case CTZ:
34429 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34430 return false;
34432 case FFS:
34433 *total = COSTS_N_INSNS (4);
34434 return false;
34436 case POPCOUNT:
34437 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34438 return false;
34440 case PARITY:
34441 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34442 return false;
34444 case NOT:
34445 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34446 *total = 0;
34447 else
34448 *total = COSTS_N_INSNS (1);
34449 return false;
34451 case AND:
34452 if (CONST_INT_P (XEXP (x, 1)))
34454 rtx left = XEXP (x, 0);
34455 rtx_code left_code = GET_CODE (left);
34457 /* rotate-and-mask: 1 insn. */
34458 if ((left_code == ROTATE
34459 || left_code == ASHIFT
34460 || left_code == LSHIFTRT)
34461 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34463 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34464 if (!CONST_INT_P (XEXP (left, 1)))
34465 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34466 *total += COSTS_N_INSNS (1);
34467 return true;
34470 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34471 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34472 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34473 || (val & 0xffff) == val
34474 || (val & 0xffff0000) == val
34475 || ((val & 0xffff) == 0 && mode == SImode))
34477 *total = rtx_cost (left, mode, AND, 0, speed);
34478 *total += COSTS_N_INSNS (1);
34479 return true;
34482 /* 2 insns. */
34483 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34485 *total = rtx_cost (left, mode, AND, 0, speed);
34486 *total += COSTS_N_INSNS (2);
34487 return true;
34491 *total = COSTS_N_INSNS (1);
34492 return false;
34494 case IOR:
34495 /* FIXME */
34496 *total = COSTS_N_INSNS (1);
34497 return true;
34499 case CLZ:
34500 case XOR:
34501 case ZERO_EXTRACT:
34502 *total = COSTS_N_INSNS (1);
34503 return false;
34505 case ASHIFT:
34506 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34507 the sign extend and shift separately within the insn. */
34508 if (TARGET_EXTSWSLI && mode == DImode
34509 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34510 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34512 *total = 0;
34513 return false;
34515 /* fall through */
34517 case ASHIFTRT:
34518 case LSHIFTRT:
34519 case ROTATE:
34520 case ROTATERT:
34521 /* Handle mul_highpart. */
34522 if (outer_code == TRUNCATE
34523 && GET_CODE (XEXP (x, 0)) == MULT)
34525 if (mode == DImode)
34526 *total = rs6000_cost->muldi;
34527 else
34528 *total = rs6000_cost->mulsi;
34529 return true;
34531 else if (outer_code == AND)
34532 *total = 0;
34533 else
34534 *total = COSTS_N_INSNS (1);
34535 return false;
34537 case SIGN_EXTEND:
34538 case ZERO_EXTEND:
34539 if (GET_CODE (XEXP (x, 0)) == MEM)
34540 *total = 0;
34541 else
34542 *total = COSTS_N_INSNS (1);
34543 return false;
34545 case COMPARE:
34546 case NEG:
34547 case ABS:
34548 if (!FLOAT_MODE_P (mode))
34550 *total = COSTS_N_INSNS (1);
34551 return false;
34553 /* FALLTHRU */
34555 case FLOAT:
34556 case UNSIGNED_FLOAT:
34557 case FIX:
34558 case UNSIGNED_FIX:
34559 case FLOAT_TRUNCATE:
34560 *total = rs6000_cost->fp;
34561 return false;
34563 case FLOAT_EXTEND:
34564 if (mode == DFmode)
34565 *total = rs6000_cost->sfdf_convert;
34566 else
34567 *total = rs6000_cost->fp;
34568 return false;
34570 case UNSPEC:
34571 switch (XINT (x, 1))
34573 case UNSPEC_FRSP:
34574 *total = rs6000_cost->fp;
34575 return true;
34577 default:
34578 break;
34580 break;
34582 case CALL:
34583 case IF_THEN_ELSE:
34584 if (!speed)
34586 *total = COSTS_N_INSNS (1);
34587 return true;
34589 else if (FLOAT_MODE_P (mode)
34590 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
34592 *total = rs6000_cost->fp;
34593 return false;
34595 break;
34597 case NE:
34598 case EQ:
34599 case GTU:
34600 case LTU:
34601 /* Carry bit requires mode == Pmode.
34602 NEG or PLUS already counted so only add one. */
34603 if (mode == Pmode
34604 && (outer_code == NEG || outer_code == PLUS))
34606 *total = COSTS_N_INSNS (1);
34607 return true;
34609 if (outer_code == SET)
34611 if (XEXP (x, 1) == const0_rtx)
34613 if (TARGET_ISEL && !TARGET_MFCRF)
34614 *total = COSTS_N_INSNS (8);
34615 else
34616 *total = COSTS_N_INSNS (2);
34617 return true;
34619 else
34621 *total = COSTS_N_INSNS (3);
34622 return false;
34625 /* FALLTHRU */
34627 case GT:
34628 case LT:
34629 case UNORDERED:
34630 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34632 if (TARGET_ISEL && !TARGET_MFCRF)
34633 *total = COSTS_N_INSNS (8);
34634 else
34635 *total = COSTS_N_INSNS (2);
34636 return true;
34638 /* CC COMPARE. */
34639 if (outer_code == COMPARE)
34641 *total = 0;
34642 return true;
34644 break;
34646 default:
34647 break;
34650 return false;
34653 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34655 static bool
34656 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34657 int opno, int *total, bool speed)
34659 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34661 fprintf (stderr,
34662 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34663 "opno = %d, total = %d, speed = %s, x:\n",
34664 ret ? "complete" : "scan inner",
34665 GET_MODE_NAME (mode),
34666 GET_RTX_NAME (outer_code),
34667 opno,
34668 *total,
34669 speed ? "true" : "false");
34671 debug_rtx (x);
34673 return ret;
34676 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34678 static int
34679 rs6000_debug_address_cost (rtx x, machine_mode mode,
34680 addr_space_t as, bool speed)
34682 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34684 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34685 ret, speed ? "true" : "false");
34686 debug_rtx (x);
34688 return ret;
34692 /* A C expression returning the cost of moving data from a register of class
34693 CLASS1 to one of CLASS2. */
34695 static int
34696 rs6000_register_move_cost (machine_mode mode,
34697 reg_class_t from, reg_class_t to)
34699 int ret;
34701 if (TARGET_DEBUG_COST)
34702 dbg_cost_ctrl++;
34704 /* Moves from/to GENERAL_REGS. */
34705 if (reg_classes_intersect_p (to, GENERAL_REGS)
34706 || reg_classes_intersect_p (from, GENERAL_REGS))
34708 reg_class_t rclass = from;
34710 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34711 rclass = to;
34713 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34714 ret = (rs6000_memory_move_cost (mode, rclass, false)
34715 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34717 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34718 shift. */
34719 else if (rclass == CR_REGS)
34720 ret = 4;
34722 /* For those processors that have slow LR/CTR moves, make them more
34723 expensive than memory in order to bias spills to memory .*/
34724 else if ((rs6000_cpu == PROCESSOR_POWER6
34725 || rs6000_cpu == PROCESSOR_POWER7
34726 || rs6000_cpu == PROCESSOR_POWER8
34727 || rs6000_cpu == PROCESSOR_POWER9)
34728 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34729 ret = 6 * hard_regno_nregs[0][mode];
34731 else
34732 /* A move will cost one instruction per GPR moved. */
34733 ret = 2 * hard_regno_nregs[0][mode];
34736 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34737 else if (VECTOR_MEM_VSX_P (mode)
34738 && reg_classes_intersect_p (to, VSX_REGS)
34739 && reg_classes_intersect_p (from, VSX_REGS))
34740 ret = 2 * hard_regno_nregs[32][mode];
34742 /* Moving between two similar registers is just one instruction. */
34743 else if (reg_classes_intersect_p (to, from))
34744 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34746 /* Everything else has to go through GENERAL_REGS. */
34747 else
34748 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34749 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34751 if (TARGET_DEBUG_COST)
34753 if (dbg_cost_ctrl == 1)
34754 fprintf (stderr,
34755 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34756 ret, GET_MODE_NAME (mode), reg_class_names[from],
34757 reg_class_names[to]);
34758 dbg_cost_ctrl--;
34761 return ret;
34764 /* A C expressions returning the cost of moving data of MODE from a register to
34765 or from memory. */
34767 static int
34768 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34769 bool in ATTRIBUTE_UNUSED)
34771 int ret;
34773 if (TARGET_DEBUG_COST)
34774 dbg_cost_ctrl++;
34776 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34777 ret = 4 * hard_regno_nregs[0][mode];
34778 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34779 || reg_classes_intersect_p (rclass, VSX_REGS)))
34780 ret = 4 * hard_regno_nregs[32][mode];
34781 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34782 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
34783 else
34784 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34786 if (TARGET_DEBUG_COST)
34788 if (dbg_cost_ctrl == 1)
34789 fprintf (stderr,
34790 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34791 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34792 dbg_cost_ctrl--;
34795 return ret;
34798 /* Returns a code for a target-specific builtin that implements
34799 reciprocal of the function, or NULL_TREE if not available. */
34801 static tree
34802 rs6000_builtin_reciprocal (tree fndecl)
34804 switch (DECL_FUNCTION_CODE (fndecl))
34806 case VSX_BUILTIN_XVSQRTDP:
34807 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34808 return NULL_TREE;
34810 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34812 case VSX_BUILTIN_XVSQRTSP:
34813 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34814 return NULL_TREE;
34816 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34818 default:
34819 return NULL_TREE;
34823 /* Load up a constant. If the mode is a vector mode, splat the value across
34824 all of the vector elements. */
34826 static rtx
34827 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34829 rtx reg;
34831 if (mode == SFmode || mode == DFmode)
34833 rtx d = const_double_from_real_value (dconst, mode);
34834 reg = force_reg (mode, d);
34836 else if (mode == V4SFmode)
34838 rtx d = const_double_from_real_value (dconst, SFmode);
34839 rtvec v = gen_rtvec (4, d, d, d, d);
34840 reg = gen_reg_rtx (mode);
34841 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34843 else if (mode == V2DFmode)
34845 rtx d = const_double_from_real_value (dconst, DFmode);
34846 rtvec v = gen_rtvec (2, d, d);
34847 reg = gen_reg_rtx (mode);
34848 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34850 else
34851 gcc_unreachable ();
34853 return reg;
34856 /* Generate an FMA instruction. */
34858 static void
34859 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34861 machine_mode mode = GET_MODE (target);
34862 rtx dst;
34864 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34865 gcc_assert (dst != NULL);
34867 if (dst != target)
34868 emit_move_insn (target, dst);
34871 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34873 static void
34874 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34876 machine_mode mode = GET_MODE (dst);
34877 rtx r;
34879 /* This is a tad more complicated, since the fnma_optab is for
34880 a different expression: fma(-m1, m2, a), which is the same
34881 thing except in the case of signed zeros.
34883 Fortunately we know that if FMA is supported that FNMSUB is
34884 also supported in the ISA. Just expand it directly. */
34886 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34888 r = gen_rtx_NEG (mode, a);
34889 r = gen_rtx_FMA (mode, m1, m2, r);
34890 r = gen_rtx_NEG (mode, r);
34891 emit_insn (gen_rtx_SET (dst, r));
34894 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34895 add a reg_note saying that this was a division. Support both scalar and
34896 vector divide. Assumes no trapping math and finite arguments. */
34898 void
34899 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34901 machine_mode mode = GET_MODE (dst);
34902 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34903 int i;
34905 /* Low precision estimates guarantee 5 bits of accuracy. High
34906 precision estimates guarantee 14 bits of accuracy. SFmode
34907 requires 23 bits of accuracy. DFmode requires 52 bits of
34908 accuracy. Each pass at least doubles the accuracy, leading
34909 to the following. */
34910 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34911 if (mode == DFmode || mode == V2DFmode)
34912 passes++;
34914 enum insn_code code = optab_handler (smul_optab, mode);
34915 insn_gen_fn gen_mul = GEN_FCN (code);
34917 gcc_assert (code != CODE_FOR_nothing);
34919 one = rs6000_load_constant_and_splat (mode, dconst1);
34921 /* x0 = 1./d estimate */
34922 x0 = gen_reg_rtx (mode);
34923 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34924 UNSPEC_FRES)));
34926 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34927 if (passes > 1) {
34929 /* e0 = 1. - d * x0 */
34930 e0 = gen_reg_rtx (mode);
34931 rs6000_emit_nmsub (e0, d, x0, one);
34933 /* x1 = x0 + e0 * x0 */
34934 x1 = gen_reg_rtx (mode);
34935 rs6000_emit_madd (x1, e0, x0, x0);
34937 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34938 ++i, xprev = xnext, eprev = enext) {
34940 /* enext = eprev * eprev */
34941 enext = gen_reg_rtx (mode);
34942 emit_insn (gen_mul (enext, eprev, eprev));
34944 /* xnext = xprev + enext * xprev */
34945 xnext = gen_reg_rtx (mode);
34946 rs6000_emit_madd (xnext, enext, xprev, xprev);
34949 } else
34950 xprev = x0;
34952 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34954 /* u = n * xprev */
34955 u = gen_reg_rtx (mode);
34956 emit_insn (gen_mul (u, n, xprev));
34958 /* v = n - (d * u) */
34959 v = gen_reg_rtx (mode);
34960 rs6000_emit_nmsub (v, d, u, n);
34962 /* dst = (v * xprev) + u */
34963 rs6000_emit_madd (dst, v, xprev, u);
34965 if (note_p)
34966 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34969 /* Goldschmidt's Algorithm for single/double-precision floating point
34970 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34972 void
34973 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34975 machine_mode mode = GET_MODE (src);
34976 rtx e = gen_reg_rtx (mode);
34977 rtx g = gen_reg_rtx (mode);
34978 rtx h = gen_reg_rtx (mode);
34980 /* Low precision estimates guarantee 5 bits of accuracy. High
34981 precision estimates guarantee 14 bits of accuracy. SFmode
34982 requires 23 bits of accuracy. DFmode requires 52 bits of
34983 accuracy. Each pass at least doubles the accuracy, leading
34984 to the following. */
34985 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34986 if (mode == DFmode || mode == V2DFmode)
34987 passes++;
34989 int i;
34990 rtx mhalf;
34991 enum insn_code code = optab_handler (smul_optab, mode);
34992 insn_gen_fn gen_mul = GEN_FCN (code);
34994 gcc_assert (code != CODE_FOR_nothing);
34996 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34998 /* e = rsqrt estimate */
34999 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35000 UNSPEC_RSQRT)));
35002 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35003 if (!recip)
35005 rtx zero = force_reg (mode, CONST0_RTX (mode));
35007 if (mode == SFmode)
35009 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35010 e, zero, mode, 0);
35011 if (target != e)
35012 emit_move_insn (e, target);
35014 else
35016 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35017 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35021 /* g = sqrt estimate. */
35022 emit_insn (gen_mul (g, e, src));
35023 /* h = 1/(2*sqrt) estimate. */
35024 emit_insn (gen_mul (h, e, mhalf));
35026 if (recip)
35028 if (passes == 1)
35030 rtx t = gen_reg_rtx (mode);
35031 rs6000_emit_nmsub (t, g, h, mhalf);
35032 /* Apply correction directly to 1/rsqrt estimate. */
35033 rs6000_emit_madd (dst, e, t, e);
35035 else
35037 for (i = 0; i < passes; i++)
35039 rtx t1 = gen_reg_rtx (mode);
35040 rtx g1 = gen_reg_rtx (mode);
35041 rtx h1 = gen_reg_rtx (mode);
35043 rs6000_emit_nmsub (t1, g, h, mhalf);
35044 rs6000_emit_madd (g1, g, t1, g);
35045 rs6000_emit_madd (h1, h, t1, h);
35047 g = g1;
35048 h = h1;
35050 /* Multiply by 2 for 1/rsqrt. */
35051 emit_insn (gen_add3_insn (dst, h, h));
35054 else
35056 rtx t = gen_reg_rtx (mode);
35057 rs6000_emit_nmsub (t, g, h, mhalf);
35058 rs6000_emit_madd (dst, g, t, g);
35061 return;
35064 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35065 (Power7) targets. DST is the target, and SRC is the argument operand. */
35067 void
35068 rs6000_emit_popcount (rtx dst, rtx src)
35070 machine_mode mode = GET_MODE (dst);
35071 rtx tmp1, tmp2;
35073 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35074 if (TARGET_POPCNTD)
35076 if (mode == SImode)
35077 emit_insn (gen_popcntdsi2 (dst, src));
35078 else
35079 emit_insn (gen_popcntddi2 (dst, src));
35080 return;
35083 tmp1 = gen_reg_rtx (mode);
35085 if (mode == SImode)
35087 emit_insn (gen_popcntbsi2 (tmp1, src));
35088 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35089 NULL_RTX, 0);
35090 tmp2 = force_reg (SImode, tmp2);
35091 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35093 else
35095 emit_insn (gen_popcntbdi2 (tmp1, src));
35096 tmp2 = expand_mult (DImode, tmp1,
35097 GEN_INT ((HOST_WIDE_INT)
35098 0x01010101 << 32 | 0x01010101),
35099 NULL_RTX, 0);
35100 tmp2 = force_reg (DImode, tmp2);
35101 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35106 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35107 target, and SRC is the argument operand. */
35109 void
35110 rs6000_emit_parity (rtx dst, rtx src)
35112 machine_mode mode = GET_MODE (dst);
35113 rtx tmp;
35115 tmp = gen_reg_rtx (mode);
35117 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35118 if (TARGET_CMPB)
35120 if (mode == SImode)
35122 emit_insn (gen_popcntbsi2 (tmp, src));
35123 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35125 else
35127 emit_insn (gen_popcntbdi2 (tmp, src));
35128 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35130 return;
35133 if (mode == SImode)
35135 /* Is mult+shift >= shift+xor+shift+xor? */
35136 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35138 rtx tmp1, tmp2, tmp3, tmp4;
35140 tmp1 = gen_reg_rtx (SImode);
35141 emit_insn (gen_popcntbsi2 (tmp1, src));
35143 tmp2 = gen_reg_rtx (SImode);
35144 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35145 tmp3 = gen_reg_rtx (SImode);
35146 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35148 tmp4 = gen_reg_rtx (SImode);
35149 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35150 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35152 else
35153 rs6000_emit_popcount (tmp, src);
35154 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35156 else
35158 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35159 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35161 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35163 tmp1 = gen_reg_rtx (DImode);
35164 emit_insn (gen_popcntbdi2 (tmp1, src));
35166 tmp2 = gen_reg_rtx (DImode);
35167 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35168 tmp3 = gen_reg_rtx (DImode);
35169 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35171 tmp4 = gen_reg_rtx (DImode);
35172 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35173 tmp5 = gen_reg_rtx (DImode);
35174 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35176 tmp6 = gen_reg_rtx (DImode);
35177 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35178 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35180 else
35181 rs6000_emit_popcount (tmp, src);
35182 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35186 /* Expand an Altivec constant permutation for little endian mode.
35187 There are two issues: First, the two input operands must be
35188 swapped so that together they form a double-wide array in LE
35189 order. Second, the vperm instruction has surprising behavior
35190 in LE mode: it interprets the elements of the source vectors
35191 in BE mode ("left to right") and interprets the elements of
35192 the destination vector in LE mode ("right to left"). To
35193 correct for this, we must subtract each element of the permute
35194 control vector from 31.
35196 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35197 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35198 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35199 serve as the permute control vector. Then, in BE mode,
35201 vperm 9,10,11,12
35203 places the desired result in vr9. However, in LE mode the
35204 vector contents will be
35206 vr10 = 00000003 00000002 00000001 00000000
35207 vr11 = 00000007 00000006 00000005 00000004
35209 The result of the vperm using the same permute control vector is
35211 vr9 = 05000000 07000000 01000000 03000000
35213 That is, the leftmost 4 bytes of vr10 are interpreted as the
35214 source for the rightmost 4 bytes of vr9, and so on.
35216 If we change the permute control vector to
35218 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35220 and issue
35222 vperm 9,11,10,12
35224 we get the desired
35226 vr9 = 00000006 00000004 00000002 00000000. */
35228 void
35229 altivec_expand_vec_perm_const_le (rtx operands[4])
35231 unsigned int i;
35232 rtx perm[16];
35233 rtx constv, unspec;
35234 rtx target = operands[0];
35235 rtx op0 = operands[1];
35236 rtx op1 = operands[2];
35237 rtx sel = operands[3];
35239 /* Unpack and adjust the constant selector. */
35240 for (i = 0; i < 16; ++i)
35242 rtx e = XVECEXP (sel, 0, i);
35243 unsigned int elt = 31 - (INTVAL (e) & 31);
35244 perm[i] = GEN_INT (elt);
35247 /* Expand to a permute, swapping the inputs and using the
35248 adjusted selector. */
35249 if (!REG_P (op0))
35250 op0 = force_reg (V16QImode, op0);
35251 if (!REG_P (op1))
35252 op1 = force_reg (V16QImode, op1);
35254 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35255 constv = force_reg (V16QImode, constv);
35256 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35257 UNSPEC_VPERM);
35258 if (!REG_P (target))
35260 rtx tmp = gen_reg_rtx (V16QImode);
35261 emit_move_insn (tmp, unspec);
35262 unspec = tmp;
35265 emit_move_insn (target, unspec);
35268 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35269 permute control vector. But here it's not a constant, so we must
35270 generate a vector NAND or NOR to do the adjustment. */
35272 void
35273 altivec_expand_vec_perm_le (rtx operands[4])
35275 rtx notx, iorx, unspec;
35276 rtx target = operands[0];
35277 rtx op0 = operands[1];
35278 rtx op1 = operands[2];
35279 rtx sel = operands[3];
35280 rtx tmp = target;
35281 rtx norreg = gen_reg_rtx (V16QImode);
35282 machine_mode mode = GET_MODE (target);
35284 /* Get everything in regs so the pattern matches. */
35285 if (!REG_P (op0))
35286 op0 = force_reg (mode, op0);
35287 if (!REG_P (op1))
35288 op1 = force_reg (mode, op1);
35289 if (!REG_P (sel))
35290 sel = force_reg (V16QImode, sel);
35291 if (!REG_P (target))
35292 tmp = gen_reg_rtx (mode);
35294 if (TARGET_P9_VECTOR)
35296 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
35297 UNSPEC_VPERMR);
35299 else
35301 /* Invert the selector with a VNAND if available, else a VNOR.
35302 The VNAND is preferred for future fusion opportunities. */
35303 notx = gen_rtx_NOT (V16QImode, sel);
35304 iorx = (TARGET_P8_VECTOR
35305 ? gen_rtx_IOR (V16QImode, notx, notx)
35306 : gen_rtx_AND (V16QImode, notx, notx));
35307 emit_insn (gen_rtx_SET (norreg, iorx));
35309 /* Permute with operands reversed and adjusted selector. */
35310 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35311 UNSPEC_VPERM);
35314 /* Copy into target, possibly by way of a register. */
35315 if (!REG_P (target))
35317 emit_move_insn (tmp, unspec);
35318 unspec = tmp;
35321 emit_move_insn (target, unspec);
35324 /* Expand an Altivec constant permutation. Return true if we match
35325 an efficient implementation; false to fall back to VPERM. */
35327 bool
35328 altivec_expand_vec_perm_const (rtx operands[4])
35330 struct altivec_perm_insn {
35331 HOST_WIDE_INT mask;
35332 enum insn_code impl;
35333 unsigned char perm[16];
35335 static const struct altivec_perm_insn patterns[] = {
35336 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35337 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35338 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35339 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35340 { OPTION_MASK_ALTIVEC,
35341 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35342 : CODE_FOR_altivec_vmrglb_direct),
35343 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35344 { OPTION_MASK_ALTIVEC,
35345 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35346 : CODE_FOR_altivec_vmrglh_direct),
35347 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35348 { OPTION_MASK_ALTIVEC,
35349 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35350 : CODE_FOR_altivec_vmrglw_direct),
35351 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35352 { OPTION_MASK_ALTIVEC,
35353 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35354 : CODE_FOR_altivec_vmrghb_direct),
35355 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35356 { OPTION_MASK_ALTIVEC,
35357 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35358 : CODE_FOR_altivec_vmrghh_direct),
35359 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35360 { OPTION_MASK_ALTIVEC,
35361 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35362 : CODE_FOR_altivec_vmrghw_direct),
35363 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35364 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
35365 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35366 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
35367 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35370 unsigned int i, j, elt, which;
35371 unsigned char perm[16];
35372 rtx target, op0, op1, sel, x;
35373 bool one_vec;
35375 target = operands[0];
35376 op0 = operands[1];
35377 op1 = operands[2];
35378 sel = operands[3];
35380 /* Unpack the constant selector. */
35381 for (i = which = 0; i < 16; ++i)
35383 rtx e = XVECEXP (sel, 0, i);
35384 elt = INTVAL (e) & 31;
35385 which |= (elt < 16 ? 1 : 2);
35386 perm[i] = elt;
35389 /* Simplify the constant selector based on operands. */
35390 switch (which)
35392 default:
35393 gcc_unreachable ();
35395 case 3:
35396 one_vec = false;
35397 if (!rtx_equal_p (op0, op1))
35398 break;
35399 /* FALLTHRU */
35401 case 2:
35402 for (i = 0; i < 16; ++i)
35403 perm[i] &= 15;
35404 op0 = op1;
35405 one_vec = true;
35406 break;
35408 case 1:
35409 op1 = op0;
35410 one_vec = true;
35411 break;
35414 /* Look for splat patterns. */
35415 if (one_vec)
35417 elt = perm[0];
35419 for (i = 0; i < 16; ++i)
35420 if (perm[i] != elt)
35421 break;
35422 if (i == 16)
35424 if (!BYTES_BIG_ENDIAN)
35425 elt = 15 - elt;
35426 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35427 return true;
35430 if (elt % 2 == 0)
35432 for (i = 0; i < 16; i += 2)
35433 if (perm[i] != elt || perm[i + 1] != elt + 1)
35434 break;
35435 if (i == 16)
35437 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35438 x = gen_reg_rtx (V8HImode);
35439 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35440 GEN_INT (field)));
35441 emit_move_insn (target, gen_lowpart (V16QImode, x));
35442 return true;
35446 if (elt % 4 == 0)
35448 for (i = 0; i < 16; i += 4)
35449 if (perm[i] != elt
35450 || perm[i + 1] != elt + 1
35451 || perm[i + 2] != elt + 2
35452 || perm[i + 3] != elt + 3)
35453 break;
35454 if (i == 16)
35456 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35457 x = gen_reg_rtx (V4SImode);
35458 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35459 GEN_INT (field)));
35460 emit_move_insn (target, gen_lowpart (V16QImode, x));
35461 return true;
35466 /* Look for merge and pack patterns. */
35467 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35469 bool swapped;
35471 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35472 continue;
35474 elt = patterns[j].perm[0];
35475 if (perm[0] == elt)
35476 swapped = false;
35477 else if (perm[0] == elt + 16)
35478 swapped = true;
35479 else
35480 continue;
35481 for (i = 1; i < 16; ++i)
35483 elt = patterns[j].perm[i];
35484 if (swapped)
35485 elt = (elt >= 16 ? elt - 16 : elt + 16);
35486 else if (one_vec && elt >= 16)
35487 elt -= 16;
35488 if (perm[i] != elt)
35489 break;
35491 if (i == 16)
35493 enum insn_code icode = patterns[j].impl;
35494 machine_mode omode = insn_data[icode].operand[0].mode;
35495 machine_mode imode = insn_data[icode].operand[1].mode;
35497 /* For little-endian, don't use vpkuwum and vpkuhum if the
35498 underlying vector type is not V4SI and V8HI, respectively.
35499 For example, using vpkuwum with a V8HI picks up the even
35500 halfwords (BE numbering) when the even halfwords (LE
35501 numbering) are what we need. */
35502 if (!BYTES_BIG_ENDIAN
35503 && icode == CODE_FOR_altivec_vpkuwum_direct
35504 && ((GET_CODE (op0) == REG
35505 && GET_MODE (op0) != V4SImode)
35506 || (GET_CODE (op0) == SUBREG
35507 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35508 continue;
35509 if (!BYTES_BIG_ENDIAN
35510 && icode == CODE_FOR_altivec_vpkuhum_direct
35511 && ((GET_CODE (op0) == REG
35512 && GET_MODE (op0) != V8HImode)
35513 || (GET_CODE (op0) == SUBREG
35514 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35515 continue;
35517 /* For little-endian, the two input operands must be swapped
35518 (or swapped back) to ensure proper right-to-left numbering
35519 from 0 to 2N-1. */
35520 if (swapped ^ !BYTES_BIG_ENDIAN)
35521 std::swap (op0, op1);
35522 if (imode != V16QImode)
35524 op0 = gen_lowpart (imode, op0);
35525 op1 = gen_lowpart (imode, op1);
35527 if (omode == V16QImode)
35528 x = target;
35529 else
35530 x = gen_reg_rtx (omode);
35531 emit_insn (GEN_FCN (icode) (x, op0, op1));
35532 if (omode != V16QImode)
35533 emit_move_insn (target, gen_lowpart (V16QImode, x));
35534 return true;
35538 if (!BYTES_BIG_ENDIAN)
35540 altivec_expand_vec_perm_const_le (operands);
35541 return true;
35544 return false;
35547 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
35548 Return true if we match an efficient implementation. */
35550 static bool
35551 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35552 unsigned char perm0, unsigned char perm1)
35554 rtx x;
35556 /* If both selectors come from the same operand, fold to single op. */
35557 if ((perm0 & 2) == (perm1 & 2))
35559 if (perm0 & 2)
35560 op0 = op1;
35561 else
35562 op1 = op0;
35564 /* If both operands are equal, fold to simpler permutation. */
35565 if (rtx_equal_p (op0, op1))
35567 perm0 = perm0 & 1;
35568 perm1 = (perm1 & 1) + 2;
35570 /* If the first selector comes from the second operand, swap. */
35571 else if (perm0 & 2)
35573 if (perm1 & 2)
35574 return false;
35575 perm0 -= 2;
35576 perm1 += 2;
35577 std::swap (op0, op1);
35579 /* If the second selector does not come from the second operand, fail. */
35580 else if ((perm1 & 2) == 0)
35581 return false;
35583 /* Success! */
35584 if (target != NULL)
35586 machine_mode vmode, dmode;
35587 rtvec v;
35589 vmode = GET_MODE (target);
35590 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35591 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
35592 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35593 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35594 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35595 emit_insn (gen_rtx_SET (target, x));
35597 return true;
35600 bool
35601 rs6000_expand_vec_perm_const (rtx operands[4])
35603 rtx target, op0, op1, sel;
35604 unsigned char perm0, perm1;
35606 target = operands[0];
35607 op0 = operands[1];
35608 op1 = operands[2];
35609 sel = operands[3];
35611 /* Unpack the constant selector. */
35612 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35613 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35615 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35618 /* Test whether a constant permutation is supported. */
35620 static bool
35621 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
35622 const unsigned char *sel)
35624 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35625 if (TARGET_ALTIVEC)
35626 return true;
35628 /* Check for ps_merge* or evmerge* insns. */
35629 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35630 || (TARGET_SPE && vmode == V2SImode))
35632 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35633 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35634 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35637 return false;
35640 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35642 static void
35643 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35644 machine_mode vmode, unsigned nelt, rtx perm[])
35646 machine_mode imode;
35647 rtx x;
35649 imode = vmode;
35650 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
35652 imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
35653 imode = mode_for_vector (imode, nelt);
35656 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
35657 x = expand_vec_perm (vmode, op0, op1, x, target);
35658 if (x != target)
35659 emit_move_insn (target, x);
35662 /* Expand an extract even operation. */
35664 void
35665 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35667 machine_mode vmode = GET_MODE (target);
35668 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35669 rtx perm[16];
35671 for (i = 0; i < nelt; i++)
35672 perm[i] = GEN_INT (i * 2);
35674 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35677 /* Expand a vector interleave operation. */
35679 void
35680 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35682 machine_mode vmode = GET_MODE (target);
35683 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35684 rtx perm[16];
35686 high = (highp ? 0 : nelt / 2);
35687 for (i = 0; i < nelt / 2; i++)
35689 perm[i * 2] = GEN_INT (i + high);
35690 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
35693 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35696 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35697 void
35698 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35700 HOST_WIDE_INT hwi_scale (scale);
35701 REAL_VALUE_TYPE r_pow;
35702 rtvec v = rtvec_alloc (2);
35703 rtx elt;
35704 rtx scale_vec = gen_reg_rtx (V2DFmode);
35705 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35706 elt = const_double_from_real_value (r_pow, DFmode);
35707 RTVEC_ELT (v, 0) = elt;
35708 RTVEC_ELT (v, 1) = elt;
35709 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35710 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35713 /* Return an RTX representing where to find the function value of a
35714 function returning MODE. */
35715 static rtx
35716 rs6000_complex_function_value (machine_mode mode)
35718 unsigned int regno;
35719 rtx r1, r2;
35720 machine_mode inner = GET_MODE_INNER (mode);
35721 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35723 if (TARGET_FLOAT128
35724 && (mode == KCmode
35725 || (mode == TCmode && TARGET_IEEEQUAD)))
35726 regno = ALTIVEC_ARG_RETURN;
35728 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35729 regno = FP_ARG_RETURN;
35731 else
35733 regno = GP_ARG_RETURN;
35735 /* 32-bit is OK since it'll go in r3/r4. */
35736 if (TARGET_32BIT && inner_bytes >= 4)
35737 return gen_rtx_REG (mode, regno);
35740 if (inner_bytes >= 8)
35741 return gen_rtx_REG (mode, regno);
35743 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35744 const0_rtx);
35745 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35746 GEN_INT (inner_bytes));
35747 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35750 /* Return an rtx describing a return value of MODE as a PARALLEL
35751 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35752 stride REG_STRIDE. */
35754 static rtx
35755 rs6000_parallel_return (machine_mode mode,
35756 int n_elts, machine_mode elt_mode,
35757 unsigned int regno, unsigned int reg_stride)
35759 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35761 int i;
35762 for (i = 0; i < n_elts; i++)
35764 rtx r = gen_rtx_REG (elt_mode, regno);
35765 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35766 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35767 regno += reg_stride;
35770 return par;
35773 /* Target hook for TARGET_FUNCTION_VALUE.
35775 On the SPE, both FPs and vectors are returned in r3.
35777 On RS/6000 an integer value is in r3 and a floating-point value is in
35778 fp1, unless -msoft-float. */
35780 static rtx
35781 rs6000_function_value (const_tree valtype,
35782 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35783 bool outgoing ATTRIBUTE_UNUSED)
35785 machine_mode mode;
35786 unsigned int regno;
35787 machine_mode elt_mode;
35788 int n_elts;
35790 /* Special handling for structs in darwin64. */
35791 if (TARGET_MACHO
35792 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35794 CUMULATIVE_ARGS valcum;
35795 rtx valret;
35797 valcum.words = 0;
35798 valcum.fregno = FP_ARG_MIN_REG;
35799 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35800 /* Do a trial code generation as if this were going to be passed as
35801 an argument; if any part goes in memory, we return NULL. */
35802 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35803 if (valret)
35804 return valret;
35805 /* Otherwise fall through to standard ABI rules. */
35808 mode = TYPE_MODE (valtype);
35810 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35811 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35813 int first_reg, n_regs;
35815 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35817 /* _Decimal128 must use even/odd register pairs. */
35818 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35819 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35821 else
35823 first_reg = ALTIVEC_ARG_RETURN;
35824 n_regs = 1;
35827 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35830 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35831 if (TARGET_32BIT && TARGET_POWERPC64)
35832 switch (mode)
35834 default:
35835 break;
35836 case DImode:
35837 case SCmode:
35838 case DCmode:
35839 case TCmode:
35840 int count = GET_MODE_SIZE (mode) / 4;
35841 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35844 if ((INTEGRAL_TYPE_P (valtype)
35845 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35846 || POINTER_TYPE_P (valtype))
35847 mode = TARGET_32BIT ? SImode : DImode;
35849 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35850 /* _Decimal128 must use an even/odd register pair. */
35851 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35852 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
35853 && !FLOAT128_VECTOR_P (mode)
35854 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35855 regno = FP_ARG_RETURN;
35856 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35857 && targetm.calls.split_complex_arg)
35858 return rs6000_complex_function_value (mode);
35859 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35860 return register is used in both cases, and we won't see V2DImode/V2DFmode
35861 for pure altivec, combine the two cases. */
35862 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35863 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35864 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35865 regno = ALTIVEC_ARG_RETURN;
35866 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
35867 && (mode == DFmode || mode == DCmode
35868 || FLOAT128_IBM_P (mode) || mode == TCmode))
35869 return spe_build_register_parallel (mode, GP_ARG_RETURN);
35870 else
35871 regno = GP_ARG_RETURN;
35873 return gen_rtx_REG (mode, regno);
35876 /* Define how to find the value returned by a library function
35877 assuming the value has mode MODE. */
35879 rs6000_libcall_value (machine_mode mode)
35881 unsigned int regno;
35883 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35884 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35885 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35887 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35888 /* _Decimal128 must use an even/odd register pair. */
35889 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35890 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35891 && TARGET_HARD_FLOAT && TARGET_FPRS
35892 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35893 regno = FP_ARG_RETURN;
35894 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35895 return register is used in both cases, and we won't see V2DImode/V2DFmode
35896 for pure altivec, combine the two cases. */
35897 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35898 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35899 regno = ALTIVEC_ARG_RETURN;
35900 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35901 return rs6000_complex_function_value (mode);
35902 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
35903 && (mode == DFmode || mode == DCmode
35904 || FLOAT128_IBM_P (mode) || mode == TCmode))
35905 return spe_build_register_parallel (mode, GP_ARG_RETURN);
35906 else
35907 regno = GP_ARG_RETURN;
35909 return gen_rtx_REG (mode, regno);
35913 /* Return true if we use LRA instead of reload pass. */
35914 static bool
35915 rs6000_lra_p (void)
35917 return TARGET_LRA;
35920 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35921 Frame pointer elimination is automatically handled.
35923 For the RS/6000, if frame pointer elimination is being done, we would like
35924 to convert ap into fp, not sp.
35926 We need r30 if -mminimal-toc was specified, and there are constant pool
35927 references. */
35929 static bool
35930 rs6000_can_eliminate (const int from, const int to)
35932 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35933 ? ! frame_pointer_needed
35934 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35935 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
35936 : true);
35939 /* Define the offset between two registers, FROM to be eliminated and its
35940 replacement TO, at the start of a routine. */
35941 HOST_WIDE_INT
35942 rs6000_initial_elimination_offset (int from, int to)
35944 rs6000_stack_t *info = rs6000_stack_info ();
35945 HOST_WIDE_INT offset;
35947 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35948 offset = info->push_p ? 0 : -info->total_size;
35949 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35951 offset = info->push_p ? 0 : -info->total_size;
35952 if (FRAME_GROWS_DOWNWARD)
35953 offset += info->fixed_size + info->vars_size + info->parm_size;
35955 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35956 offset = FRAME_GROWS_DOWNWARD
35957 ? info->fixed_size + info->vars_size + info->parm_size
35958 : 0;
35959 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35960 offset = info->total_size;
35961 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35962 offset = info->push_p ? info->total_size : 0;
35963 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35964 offset = 0;
35965 else
35966 gcc_unreachable ();
35968 return offset;
35971 static rtx
35972 rs6000_dwarf_register_span (rtx reg)
35974 rtx parts[8];
35975 int i, words;
35976 unsigned regno = REGNO (reg);
35977 machine_mode mode = GET_MODE (reg);
35979 if (TARGET_SPE
35980 && regno < 32
35981 && (SPE_VECTOR_MODE (GET_MODE (reg))
35982 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
35983 && mode != SFmode && mode != SDmode && mode != SCmode)))
35985 else
35986 return NULL_RTX;
35988 regno = REGNO (reg);
35990 /* The duality of the SPE register size wreaks all kinds of havoc.
35991 This is a way of distinguishing r0 in 32-bits from r0 in
35992 64-bits. */
35993 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
35994 gcc_assert (words <= 4);
35995 for (i = 0; i < words; i++, regno++)
35997 if (BYTES_BIG_ENDIAN)
35999 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
36000 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
36002 else
36004 parts[2 * i] = gen_rtx_REG (SImode, regno);
36005 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
36009 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
36012 /* Fill in sizes for SPE register high parts in table used by unwinder. */
36014 static void
36015 rs6000_init_dwarf_reg_sizes_extra (tree address)
36017 if (TARGET_SPE)
36019 int i;
36020 machine_mode mode = TYPE_MODE (char_type_node);
36021 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36022 rtx mem = gen_rtx_MEM (BLKmode, addr);
36023 rtx value = gen_int_mode (4, mode);
36025 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
36027 int column = DWARF_REG_TO_UNWIND_COLUMN
36028 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36029 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36031 emit_move_insn (adjust_address (mem, mode, offset), value);
36035 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36037 int i;
36038 machine_mode mode = TYPE_MODE (char_type_node);
36039 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36040 rtx mem = gen_rtx_MEM (BLKmode, addr);
36041 rtx value = gen_int_mode (16, mode);
36043 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36044 The unwinder still needs to know the size of Altivec registers. */
36046 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36048 int column = DWARF_REG_TO_UNWIND_COLUMN
36049 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36050 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36052 emit_move_insn (adjust_address (mem, mode, offset), value);
36057 /* Map internal gcc register numbers to debug format register numbers.
36058 FORMAT specifies the type of debug register number to use:
36059 0 -- debug information, except for frame-related sections
36060 1 -- DWARF .debug_frame section
36061 2 -- DWARF .eh_frame section */
36063 unsigned int
36064 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36066 /* We never use the GCC internal number for SPE high registers.
36067 Those are mapped to the 1200..1231 range for all debug formats. */
36068 if (SPE_HIGH_REGNO_P (regno))
36069 return regno - FIRST_SPE_HIGH_REGNO + 1200;
36071 /* Except for the above, we use the internal number for non-DWARF
36072 debug information, and also for .eh_frame. */
36073 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36074 return regno;
36076 /* On some platforms, we use the standard DWARF register
36077 numbering for .debug_info and .debug_frame. */
36078 #ifdef RS6000_USE_DWARF_NUMBERING
36079 if (regno <= 63)
36080 return regno;
36081 if (regno == LR_REGNO)
36082 return 108;
36083 if (regno == CTR_REGNO)
36084 return 109;
36085 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36086 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36087 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36088 to the DWARF reg for CR. */
36089 if (format == 1 && regno == CR2_REGNO)
36090 return 64;
36091 if (CR_REGNO_P (regno))
36092 return regno - CR0_REGNO + 86;
36093 if (regno == CA_REGNO)
36094 return 101; /* XER */
36095 if (ALTIVEC_REGNO_P (regno))
36096 return regno - FIRST_ALTIVEC_REGNO + 1124;
36097 if (regno == VRSAVE_REGNO)
36098 return 356;
36099 if (regno == VSCR_REGNO)
36100 return 67;
36101 if (regno == SPE_ACC_REGNO)
36102 return 99;
36103 if (regno == SPEFSCR_REGNO)
36104 return 612;
36105 #endif
36106 return regno;
36109 /* target hook eh_return_filter_mode */
36110 static machine_mode
36111 rs6000_eh_return_filter_mode (void)
36113 return TARGET_32BIT ? SImode : word_mode;
36116 /* Target hook for scalar_mode_supported_p. */
36117 static bool
36118 rs6000_scalar_mode_supported_p (machine_mode mode)
36120 /* -m32 does not support TImode. This is the default, from
36121 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36122 same ABI as for -m32. But default_scalar_mode_supported_p allows
36123 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36124 for -mpowerpc64. */
36125 if (TARGET_32BIT && mode == TImode)
36126 return false;
36128 if (DECIMAL_FLOAT_MODE_P (mode))
36129 return default_decimal_float_supported_p ();
36130 else if (TARGET_FLOAT128 && (mode == KFmode || mode == IFmode))
36131 return true;
36132 else
36133 return default_scalar_mode_supported_p (mode);
36136 /* Target hook for vector_mode_supported_p. */
36137 static bool
36138 rs6000_vector_mode_supported_p (machine_mode mode)
36141 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
36142 return true;
36144 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
36145 return true;
36147 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36148 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36149 double-double. */
36150 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36151 return true;
36153 else
36154 return false;
36157 /* Target hook for c_mode_for_suffix. */
36158 static machine_mode
36159 rs6000_c_mode_for_suffix (char suffix)
36161 if (TARGET_FLOAT128)
36163 if (suffix == 'q' || suffix == 'Q')
36164 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36166 /* At the moment, we are not defining a suffix for IBM extended double.
36167 If/when the default for -mabi=ieeelongdouble is changed, and we want
36168 to support __ibm128 constants in legacy library code, we may need to
36169 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36170 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36171 __float80 constants. */
36174 return VOIDmode;
36177 /* Target hook for invalid_arg_for_unprototyped_fn. */
36178 static const char *
36179 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36181 return (!rs6000_darwin64_abi
36182 && typelist == 0
36183 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36184 && (funcdecl == NULL_TREE
36185 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36186 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36187 ? N_("AltiVec argument passed to unprototyped function")
36188 : NULL;
36191 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36192 setup by using __stack_chk_fail_local hidden function instead of
36193 calling __stack_chk_fail directly. Otherwise it is better to call
36194 __stack_chk_fail directly. */
36196 static tree ATTRIBUTE_UNUSED
36197 rs6000_stack_protect_fail (void)
36199 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36200 ? default_hidden_stack_protect_fail ()
36201 : default_external_stack_protect_fail ();
36204 void
36205 rs6000_final_prescan_insn (rtx_insn *insn, rtx *operand ATTRIBUTE_UNUSED,
36206 int num_operands ATTRIBUTE_UNUSED)
36208 if (rs6000_warn_cell_microcode)
36210 const char *temp;
36211 int insn_code_number = recog_memoized (insn);
36212 location_t location = INSN_LOCATION (insn);
36214 /* Punt on insns we cannot recognize. */
36215 if (insn_code_number < 0)
36216 return;
36218 temp = get_insn_template (insn_code_number, insn);
36220 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
36221 warning_at (location, OPT_mwarn_cell_microcode,
36222 "emitting microcode insn %s\t[%s] #%d",
36223 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
36224 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
36225 warning_at (location, OPT_mwarn_cell_microcode,
36226 "emitting conditional microcode insn %s\t[%s] #%d",
36227 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
36231 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36233 #if TARGET_ELF
36234 static unsigned HOST_WIDE_INT
36235 rs6000_asan_shadow_offset (void)
36237 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36239 #endif
36241 /* Mask options that we want to support inside of attribute((target)) and
36242 #pragma GCC target operations. Note, we do not include things like
36243 64/32-bit, endianess, hard/soft floating point, etc. that would have
36244 different calling sequences. */
36246 struct rs6000_opt_mask {
36247 const char *name; /* option name */
36248 HOST_WIDE_INT mask; /* mask to set */
36249 bool invert; /* invert sense of mask */
36250 bool valid_target; /* option is a target option */
36253 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36255 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36256 { "cmpb", OPTION_MASK_CMPB, false, true },
36257 { "crypto", OPTION_MASK_CRYPTO, false, true },
36258 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36259 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36260 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36261 false, true },
36262 { "float128", OPTION_MASK_FLOAT128, false, false },
36263 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
36264 { "fprnd", OPTION_MASK_FPRND, false, true },
36265 { "hard-dfp", OPTION_MASK_DFP, false, true },
36266 { "htm", OPTION_MASK_HTM, false, true },
36267 { "isel", OPTION_MASK_ISEL, false, true },
36268 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36269 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36270 { "modulo", OPTION_MASK_MODULO, false, true },
36271 { "mulhw", OPTION_MASK_MULHW, false, true },
36272 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36273 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36274 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36275 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36276 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36277 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36278 { "power9-dform-scalar", OPTION_MASK_P9_DFORM_SCALAR, false, true },
36279 { "power9-dform-vector", OPTION_MASK_P9_DFORM_VECTOR, false, true },
36280 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36281 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36282 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36283 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36284 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36285 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36286 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36287 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36288 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36289 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36290 { "string", OPTION_MASK_STRING, false, true },
36291 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36292 { "update", OPTION_MASK_NO_UPDATE, true , true },
36293 { "upper-regs-di", OPTION_MASK_UPPER_REGS_DI, false, true },
36294 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, true },
36295 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, true },
36296 { "vsx", OPTION_MASK_VSX, false, true },
36297 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
36298 #ifdef OPTION_MASK_64BIT
36299 #if TARGET_AIX_OS
36300 { "aix64", OPTION_MASK_64BIT, false, false },
36301 { "aix32", OPTION_MASK_64BIT, true, false },
36302 #else
36303 { "64", OPTION_MASK_64BIT, false, false },
36304 { "32", OPTION_MASK_64BIT, true, false },
36305 #endif
36306 #endif
36307 #ifdef OPTION_MASK_EABI
36308 { "eabi", OPTION_MASK_EABI, false, false },
36309 #endif
36310 #ifdef OPTION_MASK_LITTLE_ENDIAN
36311 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36312 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36313 #endif
36314 #ifdef OPTION_MASK_RELOCATABLE
36315 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36316 #endif
36317 #ifdef OPTION_MASK_STRICT_ALIGN
36318 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36319 #endif
36320 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36321 { "string", OPTION_MASK_STRING, false, false },
36324 /* Builtin mask mapping for printing the flags. */
36325 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36327 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36328 { "vsx", RS6000_BTM_VSX, false, false },
36329 { "spe", RS6000_BTM_SPE, false, false },
36330 { "paired", RS6000_BTM_PAIRED, false, false },
36331 { "fre", RS6000_BTM_FRE, false, false },
36332 { "fres", RS6000_BTM_FRES, false, false },
36333 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36334 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36335 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36336 { "cell", RS6000_BTM_CELL, false, false },
36337 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36338 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36339 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36340 { "crypto", RS6000_BTM_CRYPTO, false, false },
36341 { "htm", RS6000_BTM_HTM, false, false },
36342 { "hard-dfp", RS6000_BTM_DFP, false, false },
36343 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36344 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36345 { "float128", RS6000_BTM_FLOAT128, false, false },
36348 /* Option variables that we want to support inside attribute((target)) and
36349 #pragma GCC target operations. */
36351 struct rs6000_opt_var {
36352 const char *name; /* option name */
36353 size_t global_offset; /* offset of the option in global_options. */
36354 size_t target_offset; /* offset of the option in target options. */
36357 static struct rs6000_opt_var const rs6000_opt_vars[] =
36359 { "friz",
36360 offsetof (struct gcc_options, x_TARGET_FRIZ),
36361 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36362 { "avoid-indexed-addresses",
36363 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36364 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36365 { "paired",
36366 offsetof (struct gcc_options, x_rs6000_paired_float),
36367 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36368 { "longcall",
36369 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36370 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36371 { "optimize-swaps",
36372 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36373 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36374 { "allow-movmisalign",
36375 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36376 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36377 { "allow-df-permute",
36378 offsetof (struct gcc_options, x_TARGET_ALLOW_DF_PERMUTE),
36379 offsetof (struct cl_target_option, x_TARGET_ALLOW_DF_PERMUTE), },
36380 { "sched-groups",
36381 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36382 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36383 { "always-hint",
36384 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36385 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36386 { "align-branch-targets",
36387 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36388 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36389 { "vectorize-builtins",
36390 offsetof (struct gcc_options, x_TARGET_VECTORIZE_BUILTINS),
36391 offsetof (struct cl_target_option, x_TARGET_VECTORIZE_BUILTINS), },
36392 { "tls-markers",
36393 offsetof (struct gcc_options, x_tls_markers),
36394 offsetof (struct cl_target_option, x_tls_markers), },
36395 { "sched-prolog",
36396 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36397 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36398 { "sched-epilog",
36399 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36400 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36401 { "gen-cell-microcode",
36402 offsetof (struct gcc_options, x_rs6000_gen_cell_microcode),
36403 offsetof (struct cl_target_option, x_rs6000_gen_cell_microcode), },
36404 { "warn-cell-microcode",
36405 offsetof (struct gcc_options, x_rs6000_warn_cell_microcode),
36406 offsetof (struct cl_target_option, x_rs6000_warn_cell_microcode), },
36409 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36410 parsing. Return true if there were no errors. */
36412 static bool
36413 rs6000_inner_target_options (tree args, bool attr_p)
36415 bool ret = true;
36417 if (args == NULL_TREE)
36420 else if (TREE_CODE (args) == STRING_CST)
36422 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36423 char *q;
36425 while ((q = strtok (p, ",")) != NULL)
36427 bool error_p = false;
36428 bool not_valid_p = false;
36429 const char *cpu_opt = NULL;
36431 p = NULL;
36432 if (strncmp (q, "cpu=", 4) == 0)
36434 int cpu_index = rs6000_cpu_name_lookup (q+4);
36435 if (cpu_index >= 0)
36436 rs6000_cpu_index = cpu_index;
36437 else
36439 error_p = true;
36440 cpu_opt = q+4;
36443 else if (strncmp (q, "tune=", 5) == 0)
36445 int tune_index = rs6000_cpu_name_lookup (q+5);
36446 if (tune_index >= 0)
36447 rs6000_tune_index = tune_index;
36448 else
36450 error_p = true;
36451 cpu_opt = q+5;
36454 else
36456 size_t i;
36457 bool invert = false;
36458 char *r = q;
36460 error_p = true;
36461 if (strncmp (r, "no-", 3) == 0)
36463 invert = true;
36464 r += 3;
36467 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36468 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36470 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36472 if (!rs6000_opt_masks[i].valid_target)
36473 not_valid_p = true;
36474 else
36476 error_p = false;
36477 rs6000_isa_flags_explicit |= mask;
36479 /* VSX needs altivec, so -mvsx automagically sets
36480 altivec and disables -mavoid-indexed-addresses. */
36481 if (!invert)
36483 if (mask == OPTION_MASK_VSX)
36485 mask |= OPTION_MASK_ALTIVEC;
36486 TARGET_AVOID_XFORM = 0;
36490 if (rs6000_opt_masks[i].invert)
36491 invert = !invert;
36493 if (invert)
36494 rs6000_isa_flags &= ~mask;
36495 else
36496 rs6000_isa_flags |= mask;
36498 break;
36501 if (error_p && !not_valid_p)
36503 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36504 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36506 size_t j = rs6000_opt_vars[i].global_offset;
36507 *((int *) ((char *)&global_options + j)) = !invert;
36508 error_p = false;
36509 not_valid_p = false;
36510 break;
36515 if (error_p)
36517 const char *eprefix, *esuffix;
36519 ret = false;
36520 if (attr_p)
36522 eprefix = "__attribute__((__target__(";
36523 esuffix = ")))";
36525 else
36527 eprefix = "#pragma GCC target ";
36528 esuffix = "";
36531 if (cpu_opt)
36532 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
36533 q, esuffix);
36534 else if (not_valid_p)
36535 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
36536 else
36537 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
36542 else if (TREE_CODE (args) == TREE_LIST)
36546 tree value = TREE_VALUE (args);
36547 if (value)
36549 bool ret2 = rs6000_inner_target_options (value, attr_p);
36550 if (!ret2)
36551 ret = false;
36553 args = TREE_CHAIN (args);
36555 while (args != NULL_TREE);
36558 else
36559 gcc_unreachable ();
36561 return ret;
36564 /* Print out the target options as a list for -mdebug=target. */
36566 static void
36567 rs6000_debug_target_options (tree args, const char *prefix)
36569 if (args == NULL_TREE)
36570 fprintf (stderr, "%s<NULL>", prefix);
36572 else if (TREE_CODE (args) == STRING_CST)
36574 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36575 char *q;
36577 while ((q = strtok (p, ",")) != NULL)
36579 p = NULL;
36580 fprintf (stderr, "%s\"%s\"", prefix, q);
36581 prefix = ", ";
36585 else if (TREE_CODE (args) == TREE_LIST)
36589 tree value = TREE_VALUE (args);
36590 if (value)
36592 rs6000_debug_target_options (value, prefix);
36593 prefix = ", ";
36595 args = TREE_CHAIN (args);
36597 while (args != NULL_TREE);
36600 else
36601 gcc_unreachable ();
36603 return;
36607 /* Hook to validate attribute((target("..."))). */
36609 static bool
36610 rs6000_valid_attribute_p (tree fndecl,
36611 tree ARG_UNUSED (name),
36612 tree args,
36613 int flags)
36615 struct cl_target_option cur_target;
36616 bool ret;
36617 tree old_optimize = build_optimization_node (&global_options);
36618 tree new_target, new_optimize;
36619 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36621 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36623 if (TARGET_DEBUG_TARGET)
36625 tree tname = DECL_NAME (fndecl);
36626 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36627 if (tname)
36628 fprintf (stderr, "function: %.*s\n",
36629 (int) IDENTIFIER_LENGTH (tname),
36630 IDENTIFIER_POINTER (tname));
36631 else
36632 fprintf (stderr, "function: unknown\n");
36634 fprintf (stderr, "args:");
36635 rs6000_debug_target_options (args, " ");
36636 fprintf (stderr, "\n");
36638 if (flags)
36639 fprintf (stderr, "flags: 0x%x\n", flags);
36641 fprintf (stderr, "--------------------\n");
36644 old_optimize = build_optimization_node (&global_options);
36645 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36647 /* If the function changed the optimization levels as well as setting target
36648 options, start with the optimizations specified. */
36649 if (func_optimize && func_optimize != old_optimize)
36650 cl_optimization_restore (&global_options,
36651 TREE_OPTIMIZATION (func_optimize));
36653 /* The target attributes may also change some optimization flags, so update
36654 the optimization options if necessary. */
36655 cl_target_option_save (&cur_target, &global_options);
36656 rs6000_cpu_index = rs6000_tune_index = -1;
36657 ret = rs6000_inner_target_options (args, true);
36659 /* Set up any additional state. */
36660 if (ret)
36662 ret = rs6000_option_override_internal (false);
36663 new_target = build_target_option_node (&global_options);
36665 else
36666 new_target = NULL;
36668 new_optimize = build_optimization_node (&global_options);
36670 if (!new_target)
36671 ret = false;
36673 else if (fndecl)
36675 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36677 if (old_optimize != new_optimize)
36678 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36681 cl_target_option_restore (&global_options, &cur_target);
36683 if (old_optimize != new_optimize)
36684 cl_optimization_restore (&global_options,
36685 TREE_OPTIMIZATION (old_optimize));
36687 return ret;
36691 /* Hook to validate the current #pragma GCC target and set the state, and
36692 update the macros based on what was changed. If ARGS is NULL, then
36693 POP_TARGET is used to reset the options. */
36695 bool
36696 rs6000_pragma_target_parse (tree args, tree pop_target)
36698 tree prev_tree = build_target_option_node (&global_options);
36699 tree cur_tree;
36700 struct cl_target_option *prev_opt, *cur_opt;
36701 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36702 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36704 if (TARGET_DEBUG_TARGET)
36706 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36707 fprintf (stderr, "args:");
36708 rs6000_debug_target_options (args, " ");
36709 fprintf (stderr, "\n");
36711 if (pop_target)
36713 fprintf (stderr, "pop_target:\n");
36714 debug_tree (pop_target);
36716 else
36717 fprintf (stderr, "pop_target: <NULL>\n");
36719 fprintf (stderr, "--------------------\n");
36722 if (! args)
36724 cur_tree = ((pop_target)
36725 ? pop_target
36726 : target_option_default_node);
36727 cl_target_option_restore (&global_options,
36728 TREE_TARGET_OPTION (cur_tree));
36730 else
36732 rs6000_cpu_index = rs6000_tune_index = -1;
36733 if (!rs6000_inner_target_options (args, false)
36734 || !rs6000_option_override_internal (false)
36735 || (cur_tree = build_target_option_node (&global_options))
36736 == NULL_TREE)
36738 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36739 fprintf (stderr, "invalid pragma\n");
36741 return false;
36745 target_option_current_node = cur_tree;
36747 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36748 change the macros that are defined. */
36749 if (rs6000_target_modify_macros_ptr)
36751 prev_opt = TREE_TARGET_OPTION (prev_tree);
36752 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36753 prev_flags = prev_opt->x_rs6000_isa_flags;
36755 cur_opt = TREE_TARGET_OPTION (cur_tree);
36756 cur_flags = cur_opt->x_rs6000_isa_flags;
36757 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36759 diff_bumask = (prev_bumask ^ cur_bumask);
36760 diff_flags = (prev_flags ^ cur_flags);
36762 if ((diff_flags != 0) || (diff_bumask != 0))
36764 /* Delete old macros. */
36765 rs6000_target_modify_macros_ptr (false,
36766 prev_flags & diff_flags,
36767 prev_bumask & diff_bumask);
36769 /* Define new macros. */
36770 rs6000_target_modify_macros_ptr (true,
36771 cur_flags & diff_flags,
36772 cur_bumask & diff_bumask);
36776 return true;
36780 /* Remember the last target of rs6000_set_current_function. */
36781 static GTY(()) tree rs6000_previous_fndecl;
36783 /* Establish appropriate back-end context for processing the function
36784 FNDECL. The argument might be NULL to indicate processing at top
36785 level, outside of any function scope. */
36786 static void
36787 rs6000_set_current_function (tree fndecl)
36789 tree old_tree = (rs6000_previous_fndecl
36790 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
36791 : NULL_TREE);
36793 tree new_tree = (fndecl
36794 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
36795 : NULL_TREE);
36797 if (TARGET_DEBUG_TARGET)
36799 bool print_final = false;
36800 fprintf (stderr, "\n==================== rs6000_set_current_function");
36802 if (fndecl)
36803 fprintf (stderr, ", fndecl %s (%p)",
36804 (DECL_NAME (fndecl)
36805 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36806 : "<unknown>"), (void *)fndecl);
36808 if (rs6000_previous_fndecl)
36809 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36811 fprintf (stderr, "\n");
36812 if (new_tree)
36814 fprintf (stderr, "\nnew fndecl target specific options:\n");
36815 debug_tree (new_tree);
36816 print_final = true;
36819 if (old_tree)
36821 fprintf (stderr, "\nold fndecl target specific options:\n");
36822 debug_tree (old_tree);
36823 print_final = true;
36826 if (print_final)
36827 fprintf (stderr, "--------------------\n");
36830 /* Only change the context if the function changes. This hook is called
36831 several times in the course of compiling a function, and we don't want to
36832 slow things down too much or call target_reinit when it isn't safe. */
36833 if (fndecl && fndecl != rs6000_previous_fndecl)
36835 rs6000_previous_fndecl = fndecl;
36836 if (old_tree == new_tree)
36839 else if (new_tree && new_tree != target_option_default_node)
36841 cl_target_option_restore (&global_options,
36842 TREE_TARGET_OPTION (new_tree));
36843 if (TREE_TARGET_GLOBALS (new_tree))
36844 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36845 else
36846 TREE_TARGET_GLOBALS (new_tree)
36847 = save_target_globals_default_opts ();
36850 else if (old_tree && old_tree != target_option_default_node)
36852 new_tree = target_option_current_node;
36853 cl_target_option_restore (&global_options,
36854 TREE_TARGET_OPTION (new_tree));
36855 if (TREE_TARGET_GLOBALS (new_tree))
36856 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36857 else if (new_tree == target_option_default_node)
36858 restore_target_globals (&default_target_globals);
36859 else
36860 TREE_TARGET_GLOBALS (new_tree)
36861 = save_target_globals_default_opts ();
36867 /* Save the current options */
36869 static void
36870 rs6000_function_specific_save (struct cl_target_option *ptr,
36871 struct gcc_options *opts)
36873 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36874 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36877 /* Restore the current options */
36879 static void
36880 rs6000_function_specific_restore (struct gcc_options *opts,
36881 struct cl_target_option *ptr)
36884 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36885 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36886 (void) rs6000_option_override_internal (false);
36889 /* Print the current options */
36891 static void
36892 rs6000_function_specific_print (FILE *file, int indent,
36893 struct cl_target_option *ptr)
36895 rs6000_print_isa_options (file, indent, "Isa options set",
36896 ptr->x_rs6000_isa_flags);
36898 rs6000_print_isa_options (file, indent, "Isa options explicit",
36899 ptr->x_rs6000_isa_flags_explicit);
36902 /* Helper function to print the current isa or misc options on a line. */
36904 static void
36905 rs6000_print_options_internal (FILE *file,
36906 int indent,
36907 const char *string,
36908 HOST_WIDE_INT flags,
36909 const char *prefix,
36910 const struct rs6000_opt_mask *opts,
36911 size_t num_elements)
36913 size_t i;
36914 size_t start_column = 0;
36915 size_t cur_column;
36916 size_t max_column = 120;
36917 size_t prefix_len = strlen (prefix);
36918 size_t comma_len = 0;
36919 const char *comma = "";
36921 if (indent)
36922 start_column += fprintf (file, "%*s", indent, "");
36924 if (!flags)
36926 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36927 return;
36930 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36932 /* Print the various mask options. */
36933 cur_column = start_column;
36934 for (i = 0; i < num_elements; i++)
36936 bool invert = opts[i].invert;
36937 const char *name = opts[i].name;
36938 const char *no_str = "";
36939 HOST_WIDE_INT mask = opts[i].mask;
36940 size_t len = comma_len + prefix_len + strlen (name);
36942 if (!invert)
36944 if ((flags & mask) == 0)
36946 no_str = "no-";
36947 len += sizeof ("no-") - 1;
36950 flags &= ~mask;
36953 else
36955 if ((flags & mask) != 0)
36957 no_str = "no-";
36958 len += sizeof ("no-") - 1;
36961 flags |= mask;
36964 cur_column += len;
36965 if (cur_column > max_column)
36967 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36968 cur_column = start_column + len;
36969 comma = "";
36972 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36973 comma = ", ";
36974 comma_len = sizeof (", ") - 1;
36977 fputs ("\n", file);
36980 /* Helper function to print the current isa options on a line. */
36982 static void
36983 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36984 HOST_WIDE_INT flags)
36986 rs6000_print_options_internal (file, indent, string, flags, "-m",
36987 &rs6000_opt_masks[0],
36988 ARRAY_SIZE (rs6000_opt_masks));
36991 static void
36992 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36993 HOST_WIDE_INT flags)
36995 rs6000_print_options_internal (file, indent, string, flags, "",
36996 &rs6000_builtin_mask_names[0],
36997 ARRAY_SIZE (rs6000_builtin_mask_names));
37001 /* Hook to determine if one function can safely inline another. */
37003 static bool
37004 rs6000_can_inline_p (tree caller, tree callee)
37006 bool ret = false;
37007 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37008 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37010 /* If callee has no option attributes, then it is ok to inline. */
37011 if (!callee_tree)
37012 ret = true;
37014 /* If caller has no option attributes, but callee does then it is not ok to
37015 inline. */
37016 else if (!caller_tree)
37017 ret = false;
37019 else
37021 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37022 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37024 /* Callee's options should a subset of the caller's, i.e. a vsx function
37025 can inline an altivec function but a non-vsx function can't inline a
37026 vsx function. */
37027 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37028 == callee_opts->x_rs6000_isa_flags)
37029 ret = true;
37032 if (TARGET_DEBUG_TARGET)
37033 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37034 (DECL_NAME (caller)
37035 ? IDENTIFIER_POINTER (DECL_NAME (caller))
37036 : "<unknown>"),
37037 (DECL_NAME (callee)
37038 ? IDENTIFIER_POINTER (DECL_NAME (callee))
37039 : "<unknown>"),
37040 (ret ? "can" : "cannot"));
37042 return ret;
37045 /* Allocate a stack temp and fixup the address so it meets the particular
37046 memory requirements (either offetable or REG+REG addressing). */
37049 rs6000_allocate_stack_temp (machine_mode mode,
37050 bool offsettable_p,
37051 bool reg_reg_p)
37053 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37054 rtx addr = XEXP (stack, 0);
37055 int strict_p = (reload_in_progress || reload_completed);
37057 if (!legitimate_indirect_address_p (addr, strict_p))
37059 if (offsettable_p
37060 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37061 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37063 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37064 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37067 return stack;
37070 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37071 to such a form to deal with memory reference instructions like STFIWX that
37072 only take reg+reg addressing. */
37075 rs6000_address_for_fpconvert (rtx x)
37077 int strict_p = (reload_in_progress || reload_completed);
37078 rtx addr;
37080 gcc_assert (MEM_P (x));
37081 addr = XEXP (x, 0);
37082 if (! legitimate_indirect_address_p (addr, strict_p)
37083 && ! legitimate_indexed_address_p (addr, strict_p))
37085 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37087 rtx reg = XEXP (addr, 0);
37088 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37089 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37090 gcc_assert (REG_P (reg));
37091 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37092 addr = reg;
37094 else if (GET_CODE (addr) == PRE_MODIFY)
37096 rtx reg = XEXP (addr, 0);
37097 rtx expr = XEXP (addr, 1);
37098 gcc_assert (REG_P (reg));
37099 gcc_assert (GET_CODE (expr) == PLUS);
37100 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37101 addr = reg;
37104 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37107 return x;
37110 /* Given a memory reference, if it is not in the form for altivec memory
37111 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37112 convert to the altivec format. */
37115 rs6000_address_for_altivec (rtx x)
37117 gcc_assert (MEM_P (x));
37118 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
37120 rtx addr = XEXP (x, 0);
37121 int strict_p = (reload_in_progress || reload_completed);
37123 if (!legitimate_indexed_address_p (addr, strict_p)
37124 && !legitimate_indirect_address_p (addr, strict_p))
37125 addr = copy_to_mode_reg (Pmode, addr);
37127 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
37128 x = change_address (x, GET_MODE (x), addr);
37131 return x;
37134 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37136 On the RS/6000, all integer constants are acceptable, most won't be valid
37137 for particular insns, though. Only easy FP constants are acceptable. */
37139 static bool
37140 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37142 if (TARGET_ELF && tls_referenced_p (x))
37143 return false;
37145 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37146 || GET_MODE (x) == VOIDmode
37147 || (TARGET_POWERPC64 && mode == DImode)
37148 || easy_fp_constant (x, mode)
37149 || easy_vector_constant (x, mode));
37153 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37155 static bool
37156 chain_already_loaded (rtx_insn *last)
37158 for (; last != NULL; last = PREV_INSN (last))
37160 if (NONJUMP_INSN_P (last))
37162 rtx patt = PATTERN (last);
37164 if (GET_CODE (patt) == SET)
37166 rtx lhs = XEXP (patt, 0);
37168 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37169 return true;
37173 return false;
37176 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37178 void
37179 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37181 const bool direct_call_p
37182 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37183 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37184 rtx toc_load = NULL_RTX;
37185 rtx toc_restore = NULL_RTX;
37186 rtx func_addr;
37187 rtx abi_reg = NULL_RTX;
37188 rtx call[4];
37189 int n_call;
37190 rtx insn;
37192 /* Handle longcall attributes. */
37193 if (INTVAL (cookie) & CALL_LONG)
37194 func_desc = rs6000_longcall_ref (func_desc);
37196 /* Handle indirect calls. */
37197 if (GET_CODE (func_desc) != SYMBOL_REF
37198 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37200 /* Save the TOC into its reserved slot before the call,
37201 and prepare to restore it after the call. */
37202 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37203 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37204 rtx stack_toc_mem = gen_frame_mem (Pmode,
37205 gen_rtx_PLUS (Pmode, stack_ptr,
37206 stack_toc_offset));
37207 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37208 gen_rtvec (1, stack_toc_offset),
37209 UNSPEC_TOCSLOT);
37210 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37212 /* Can we optimize saving the TOC in the prologue or
37213 do we need to do it at every call? */
37214 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37215 cfun->machine->save_toc_in_prologue = true;
37216 else
37218 MEM_VOLATILE_P (stack_toc_mem) = 1;
37219 emit_move_insn (stack_toc_mem, toc_reg);
37222 if (DEFAULT_ABI == ABI_ELFv2)
37224 /* A function pointer in the ELFv2 ABI is just a plain address, but
37225 the ABI requires it to be loaded into r12 before the call. */
37226 func_addr = gen_rtx_REG (Pmode, 12);
37227 emit_move_insn (func_addr, func_desc);
37228 abi_reg = func_addr;
37230 else
37232 /* A function pointer under AIX is a pointer to a data area whose
37233 first word contains the actual address of the function, whose
37234 second word contains a pointer to its TOC, and whose third word
37235 contains a value to place in the static chain register (r11).
37236 Note that if we load the static chain, our "trampoline" need
37237 not have any executable code. */
37239 /* Load up address of the actual function. */
37240 func_desc = force_reg (Pmode, func_desc);
37241 func_addr = gen_reg_rtx (Pmode);
37242 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37244 /* Prepare to load the TOC of the called function. Note that the
37245 TOC load must happen immediately before the actual call so
37246 that unwinding the TOC registers works correctly. See the
37247 comment in frob_update_context. */
37248 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37249 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37250 gen_rtx_PLUS (Pmode, func_desc,
37251 func_toc_offset));
37252 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37254 /* If we have a static chain, load it up. But, if the call was
37255 originally direct, the 3rd word has not been written since no
37256 trampoline has been built, so we ought not to load it, lest we
37257 override a static chain value. */
37258 if (!direct_call_p
37259 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37260 && !chain_already_loaded (get_current_sequence ()->next->last))
37262 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37263 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37264 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37265 gen_rtx_PLUS (Pmode, func_desc,
37266 func_sc_offset));
37267 emit_move_insn (sc_reg, func_sc_mem);
37268 abi_reg = sc_reg;
37272 else
37274 /* Direct calls use the TOC: for local calls, the callee will
37275 assume the TOC register is set; for non-local calls, the
37276 PLT stub needs the TOC register. */
37277 abi_reg = toc_reg;
37278 func_addr = func_desc;
37281 /* Create the call. */
37282 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37283 if (value != NULL_RTX)
37284 call[0] = gen_rtx_SET (value, call[0]);
37285 n_call = 1;
37287 if (toc_load)
37288 call[n_call++] = toc_load;
37289 if (toc_restore)
37290 call[n_call++] = toc_restore;
37292 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37294 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37295 insn = emit_call_insn (insn);
37297 /* Mention all registers defined by the ABI to hold information
37298 as uses in CALL_INSN_FUNCTION_USAGE. */
37299 if (abi_reg)
37300 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37303 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37305 void
37306 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37308 rtx call[2];
37309 rtx insn;
37311 gcc_assert (INTVAL (cookie) == 0);
37313 /* Create the call. */
37314 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37315 if (value != NULL_RTX)
37316 call[0] = gen_rtx_SET (value, call[0]);
37318 call[1] = simple_return_rtx;
37320 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37321 insn = emit_call_insn (insn);
37323 /* Note use of the TOC register. */
37324 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37325 /* We need to also mark a use of the link register since the function we
37326 sibling-call to will use it to return to our caller. */
37327 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
37330 /* Return whether we need to always update the saved TOC pointer when we update
37331 the stack pointer. */
37333 static bool
37334 rs6000_save_toc_in_prologue_p (void)
37336 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37339 #ifdef HAVE_GAS_HIDDEN
37340 # define USE_HIDDEN_LINKONCE 1
37341 #else
37342 # define USE_HIDDEN_LINKONCE 0
37343 #endif
37345 /* Fills in the label name that should be used for a 476 link stack thunk. */
37347 void
37348 get_ppc476_thunk_name (char name[32])
37350 gcc_assert (TARGET_LINK_STACK);
37352 if (USE_HIDDEN_LINKONCE)
37353 sprintf (name, "__ppc476.get_thunk");
37354 else
37355 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37358 /* This function emits the simple thunk routine that is used to preserve
37359 the link stack on the 476 cpu. */
37361 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37362 static void
37363 rs6000_code_end (void)
37365 char name[32];
37366 tree decl;
37368 if (!TARGET_LINK_STACK)
37369 return;
37371 get_ppc476_thunk_name (name);
37373 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37374 build_function_type_list (void_type_node, NULL_TREE));
37375 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37376 NULL_TREE, void_type_node);
37377 TREE_PUBLIC (decl) = 1;
37378 TREE_STATIC (decl) = 1;
37380 #if RS6000_WEAK
37381 if (USE_HIDDEN_LINKONCE)
37383 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37384 targetm.asm_out.unique_section (decl, 0);
37385 switch_to_section (get_named_section (decl, NULL, 0));
37386 DECL_WEAK (decl) = 1;
37387 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37388 targetm.asm_out.globalize_label (asm_out_file, name);
37389 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37390 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37392 else
37393 #endif
37395 switch_to_section (text_section);
37396 ASM_OUTPUT_LABEL (asm_out_file, name);
37399 DECL_INITIAL (decl) = make_node (BLOCK);
37400 current_function_decl = decl;
37401 allocate_struct_function (decl, false);
37402 init_function_start (decl);
37403 first_function_block_is_cold = false;
37404 /* Make sure unwind info is emitted for the thunk if needed. */
37405 final_start_function (emit_barrier (), asm_out_file, 1);
37407 fputs ("\tblr\n", asm_out_file);
37409 final_end_function ();
37410 init_insn_lengths ();
37411 free_after_compilation (cfun);
37412 set_cfun (NULL);
37413 current_function_decl = NULL;
37416 /* Add r30 to hard reg set if the prologue sets it up and it is not
37417 pic_offset_table_rtx. */
37419 static void
37420 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37422 if (!TARGET_SINGLE_PIC_BASE
37423 && TARGET_TOC
37424 && TARGET_MINIMAL_TOC
37425 && get_pool_size () != 0)
37426 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37427 if (cfun->machine->split_stack_argp_used)
37428 add_to_hard_reg_set (&set->set, Pmode, 12);
37432 /* Helper function for rs6000_split_logical to emit a logical instruction after
37433 spliting the operation to single GPR registers.
37435 DEST is the destination register.
37436 OP1 and OP2 are the input source registers.
37437 CODE is the base operation (AND, IOR, XOR, NOT).
37438 MODE is the machine mode.
37439 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37440 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37441 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37443 static void
37444 rs6000_split_logical_inner (rtx dest,
37445 rtx op1,
37446 rtx op2,
37447 enum rtx_code code,
37448 machine_mode mode,
37449 bool complement_final_p,
37450 bool complement_op1_p,
37451 bool complement_op2_p)
37453 rtx bool_rtx;
37455 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37456 if (op2 && GET_CODE (op2) == CONST_INT
37457 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37458 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37460 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37461 HOST_WIDE_INT value = INTVAL (op2) & mask;
37463 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37464 if (code == AND)
37466 if (value == 0)
37468 emit_insn (gen_rtx_SET (dest, const0_rtx));
37469 return;
37472 else if (value == mask)
37474 if (!rtx_equal_p (dest, op1))
37475 emit_insn (gen_rtx_SET (dest, op1));
37476 return;
37480 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37481 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37482 else if (code == IOR || code == XOR)
37484 if (value == 0)
37486 if (!rtx_equal_p (dest, op1))
37487 emit_insn (gen_rtx_SET (dest, op1));
37488 return;
37493 if (code == AND && mode == SImode
37494 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37496 emit_insn (gen_andsi3 (dest, op1, op2));
37497 return;
37500 if (complement_op1_p)
37501 op1 = gen_rtx_NOT (mode, op1);
37503 if (complement_op2_p)
37504 op2 = gen_rtx_NOT (mode, op2);
37506 /* For canonical RTL, if only one arm is inverted it is the first. */
37507 if (!complement_op1_p && complement_op2_p)
37508 std::swap (op1, op2);
37510 bool_rtx = ((code == NOT)
37511 ? gen_rtx_NOT (mode, op1)
37512 : gen_rtx_fmt_ee (code, mode, op1, op2));
37514 if (complement_final_p)
37515 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37517 emit_insn (gen_rtx_SET (dest, bool_rtx));
37520 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37521 operations are split immediately during RTL generation to allow for more
37522 optimizations of the AND/IOR/XOR.
37524 OPERANDS is an array containing the destination and two input operands.
37525 CODE is the base operation (AND, IOR, XOR, NOT).
37526 MODE is the machine mode.
37527 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37528 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37529 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37530 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37531 formation of the AND instructions. */
37533 static void
37534 rs6000_split_logical_di (rtx operands[3],
37535 enum rtx_code code,
37536 bool complement_final_p,
37537 bool complement_op1_p,
37538 bool complement_op2_p)
37540 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37541 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37542 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37543 enum hi_lo { hi = 0, lo = 1 };
37544 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37545 size_t i;
37547 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37548 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37549 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37550 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37552 if (code == NOT)
37553 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37554 else
37556 if (GET_CODE (operands[2]) != CONST_INT)
37558 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37559 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37561 else
37563 HOST_WIDE_INT value = INTVAL (operands[2]);
37564 HOST_WIDE_INT value_hi_lo[2];
37566 gcc_assert (!complement_final_p);
37567 gcc_assert (!complement_op1_p);
37568 gcc_assert (!complement_op2_p);
37570 value_hi_lo[hi] = value >> 32;
37571 value_hi_lo[lo] = value & lower_32bits;
37573 for (i = 0; i < 2; i++)
37575 HOST_WIDE_INT sub_value = value_hi_lo[i];
37577 if (sub_value & sign_bit)
37578 sub_value |= upper_32bits;
37580 op2_hi_lo[i] = GEN_INT (sub_value);
37582 /* If this is an AND instruction, check to see if we need to load
37583 the value in a register. */
37584 if (code == AND && sub_value != -1 && sub_value != 0
37585 && !and_operand (op2_hi_lo[i], SImode))
37586 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37591 for (i = 0; i < 2; i++)
37593 /* Split large IOR/XOR operations. */
37594 if ((code == IOR || code == XOR)
37595 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37596 && !complement_final_p
37597 && !complement_op1_p
37598 && !complement_op2_p
37599 && !logical_const_operand (op2_hi_lo[i], SImode))
37601 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37602 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37603 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37604 rtx tmp = gen_reg_rtx (SImode);
37606 /* Make sure the constant is sign extended. */
37607 if ((hi_16bits & sign_bit) != 0)
37608 hi_16bits |= upper_32bits;
37610 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37611 code, SImode, false, false, false);
37613 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37614 code, SImode, false, false, false);
37616 else
37617 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37618 code, SImode, complement_final_p,
37619 complement_op1_p, complement_op2_p);
37622 return;
37625 /* Split the insns that make up boolean operations operating on multiple GPR
37626 registers. The boolean MD patterns ensure that the inputs either are
37627 exactly the same as the output registers, or there is no overlap.
37629 OPERANDS is an array containing the destination and two input operands.
37630 CODE is the base operation (AND, IOR, XOR, NOT).
37631 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37632 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37633 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37635 void
37636 rs6000_split_logical (rtx operands[3],
37637 enum rtx_code code,
37638 bool complement_final_p,
37639 bool complement_op1_p,
37640 bool complement_op2_p)
37642 machine_mode mode = GET_MODE (operands[0]);
37643 machine_mode sub_mode;
37644 rtx op0, op1, op2;
37645 int sub_size, regno0, regno1, nregs, i;
37647 /* If this is DImode, use the specialized version that can run before
37648 register allocation. */
37649 if (mode == DImode && !TARGET_POWERPC64)
37651 rs6000_split_logical_di (operands, code, complement_final_p,
37652 complement_op1_p, complement_op2_p);
37653 return;
37656 op0 = operands[0];
37657 op1 = operands[1];
37658 op2 = (code == NOT) ? NULL_RTX : operands[2];
37659 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37660 sub_size = GET_MODE_SIZE (sub_mode);
37661 regno0 = REGNO (op0);
37662 regno1 = REGNO (op1);
37664 gcc_assert (reload_completed);
37665 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37666 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37668 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37669 gcc_assert (nregs > 1);
37671 if (op2 && REG_P (op2))
37672 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37674 for (i = 0; i < nregs; i++)
37676 int offset = i * sub_size;
37677 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37678 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37679 rtx sub_op2 = ((code == NOT)
37680 ? NULL_RTX
37681 : simplify_subreg (sub_mode, op2, mode, offset));
37683 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37684 complement_final_p, complement_op1_p,
37685 complement_op2_p);
37688 return;
37692 /* Return true if the peephole2 can combine a load involving a combination of
37693 an addis instruction and a load with an offset that can be fused together on
37694 a power8. */
37696 bool
37697 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
37698 rtx addis_value, /* addis value. */
37699 rtx target, /* target register that is loaded. */
37700 rtx mem) /* bottom part of the memory addr. */
37702 rtx addr;
37703 rtx base_reg;
37705 /* Validate arguments. */
37706 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37707 return false;
37709 if (!base_reg_operand (target, GET_MODE (target)))
37710 return false;
37712 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37713 return false;
37715 /* Allow sign/zero extension. */
37716 if (GET_CODE (mem) == ZERO_EXTEND
37717 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
37718 mem = XEXP (mem, 0);
37720 if (!MEM_P (mem))
37721 return false;
37723 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
37724 return false;
37726 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37727 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
37728 return false;
37730 /* Validate that the register used to load the high value is either the
37731 register being loaded, or we can safely replace its use.
37733 This function is only called from the peephole2 pass and we assume that
37734 there are 2 instructions in the peephole (addis and load), so we want to
37735 check if the target register was not used in the memory address and the
37736 register to hold the addis result is dead after the peephole. */
37737 if (REGNO (addis_reg) != REGNO (target))
37739 if (reg_mentioned_p (target, mem))
37740 return false;
37742 if (!peep2_reg_dead_p (2, addis_reg))
37743 return false;
37745 /* If the target register being loaded is the stack pointer, we must
37746 avoid loading any other value into it, even temporarily. */
37747 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37748 return false;
37751 base_reg = XEXP (addr, 0);
37752 return REGNO (addis_reg) == REGNO (base_reg);
37755 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37756 sequence. We adjust the addis register to use the target register. If the
37757 load sign extends, we adjust the code to do the zero extending load, and an
37758 explicit sign extension later since the fusion only covers zero extending
37759 loads.
37761 The operands are:
37762 operands[0] register set with addis (to be replaced with target)
37763 operands[1] value set via addis
37764 operands[2] target register being loaded
37765 operands[3] D-form memory reference using operands[0]. */
37767 void
37768 expand_fusion_gpr_load (rtx *operands)
37770 rtx addis_value = operands[1];
37771 rtx target = operands[2];
37772 rtx orig_mem = operands[3];
37773 rtx new_addr, new_mem, orig_addr, offset;
37774 enum rtx_code plus_or_lo_sum;
37775 machine_mode target_mode = GET_MODE (target);
37776 machine_mode extend_mode = target_mode;
37777 machine_mode ptr_mode = Pmode;
37778 enum rtx_code extend = UNKNOWN;
37780 if (GET_CODE (orig_mem) == ZERO_EXTEND
37781 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37783 extend = GET_CODE (orig_mem);
37784 orig_mem = XEXP (orig_mem, 0);
37785 target_mode = GET_MODE (orig_mem);
37788 gcc_assert (MEM_P (orig_mem));
37790 orig_addr = XEXP (orig_mem, 0);
37791 plus_or_lo_sum = GET_CODE (orig_addr);
37792 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37794 offset = XEXP (orig_addr, 1);
37795 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37796 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37798 if (extend != UNKNOWN)
37799 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
37801 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37802 UNSPEC_FUSION_GPR);
37803 emit_insn (gen_rtx_SET (target, new_mem));
37805 if (extend == SIGN_EXTEND)
37807 int sub_off = ((BYTES_BIG_ENDIAN)
37808 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
37809 : 0);
37810 rtx sign_reg
37811 = simplify_subreg (target_mode, target, extend_mode, sub_off);
37813 emit_insn (gen_rtx_SET (target,
37814 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
37817 return;
37820 /* Emit the addis instruction that will be part of a fused instruction
37821 sequence. */
37823 void
37824 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
37825 const char *mode_name)
37827 rtx fuse_ops[10];
37828 char insn_template[80];
37829 const char *addis_str = NULL;
37830 const char *comment_str = ASM_COMMENT_START;
37832 if (*comment_str == ' ')
37833 comment_str++;
37835 /* Emit the addis instruction. */
37836 fuse_ops[0] = target;
37837 if (satisfies_constraint_L (addis_value))
37839 fuse_ops[1] = addis_value;
37840 addis_str = "lis %0,%v1";
37843 else if (GET_CODE (addis_value) == PLUS)
37845 rtx op0 = XEXP (addis_value, 0);
37846 rtx op1 = XEXP (addis_value, 1);
37848 if (REG_P (op0) && CONST_INT_P (op1)
37849 && satisfies_constraint_L (op1))
37851 fuse_ops[1] = op0;
37852 fuse_ops[2] = op1;
37853 addis_str = "addis %0,%1,%v2";
37857 else if (GET_CODE (addis_value) == HIGH)
37859 rtx value = XEXP (addis_value, 0);
37860 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
37862 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
37863 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
37864 if (TARGET_ELF)
37865 addis_str = "addis %0,%2,%1@toc@ha";
37867 else if (TARGET_XCOFF)
37868 addis_str = "addis %0,%1@u(%2)";
37870 else
37871 gcc_unreachable ();
37874 else if (GET_CODE (value) == PLUS)
37876 rtx op0 = XEXP (value, 0);
37877 rtx op1 = XEXP (value, 1);
37879 if (GET_CODE (op0) == UNSPEC
37880 && XINT (op0, 1) == UNSPEC_TOCREL
37881 && CONST_INT_P (op1))
37883 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
37884 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
37885 fuse_ops[3] = op1;
37886 if (TARGET_ELF)
37887 addis_str = "addis %0,%2,%1+%3@toc@ha";
37889 else if (TARGET_XCOFF)
37890 addis_str = "addis %0,%1+%3@u(%2)";
37892 else
37893 gcc_unreachable ();
37897 else if (satisfies_constraint_L (value))
37899 fuse_ops[1] = value;
37900 addis_str = "lis %0,%v1";
37903 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
37905 fuse_ops[1] = value;
37906 addis_str = "lis %0,%1@ha";
37910 if (!addis_str)
37911 fatal_insn ("Could not generate addis value for fusion", addis_value);
37913 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
37914 comment, mode_name);
37915 output_asm_insn (insn_template, fuse_ops);
37918 /* Emit a D-form load or store instruction that is the second instruction
37919 of a fusion sequence. */
37921 void
37922 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
37923 const char *insn_str)
37925 rtx fuse_ops[10];
37926 char insn_template[80];
37928 fuse_ops[0] = load_store_reg;
37929 fuse_ops[1] = addis_reg;
37931 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
37933 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
37934 fuse_ops[2] = offset;
37935 output_asm_insn (insn_template, fuse_ops);
37938 else if (GET_CODE (offset) == UNSPEC
37939 && XINT (offset, 1) == UNSPEC_TOCREL)
37941 if (TARGET_ELF)
37942 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
37944 else if (TARGET_XCOFF)
37945 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37947 else
37948 gcc_unreachable ();
37950 fuse_ops[2] = XVECEXP (offset, 0, 0);
37951 output_asm_insn (insn_template, fuse_ops);
37954 else if (GET_CODE (offset) == PLUS
37955 && GET_CODE (XEXP (offset, 0)) == UNSPEC
37956 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
37957 && CONST_INT_P (XEXP (offset, 1)))
37959 rtx tocrel_unspec = XEXP (offset, 0);
37960 if (TARGET_ELF)
37961 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
37963 else if (TARGET_XCOFF)
37964 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
37966 else
37967 gcc_unreachable ();
37969 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
37970 fuse_ops[3] = XEXP (offset, 1);
37971 output_asm_insn (insn_template, fuse_ops);
37974 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
37976 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37978 fuse_ops[2] = offset;
37979 output_asm_insn (insn_template, fuse_ops);
37982 else
37983 fatal_insn ("Unable to generate load/store offset for fusion", offset);
37985 return;
37988 /* Wrap a TOC address that can be fused to indicate that special fusion
37989 processing is needed. */
37992 fusion_wrap_memory_address (rtx old_mem)
37994 rtx old_addr = XEXP (old_mem, 0);
37995 rtvec v = gen_rtvec (1, old_addr);
37996 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
37997 return replace_equiv_address_nv (old_mem, new_addr, false);
38000 /* Given an address, convert it into the addis and load offset parts. Addresses
38001 created during the peephole2 process look like:
38002 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38003 (unspec [(...)] UNSPEC_TOCREL))
38005 Addresses created via toc fusion look like:
38006 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38008 static void
38009 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38011 rtx hi, lo;
38013 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38015 lo = XVECEXP (addr, 0, 0);
38016 hi = gen_rtx_HIGH (Pmode, lo);
38018 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38020 hi = XEXP (addr, 0);
38021 lo = XEXP (addr, 1);
38023 else
38024 gcc_unreachable ();
38026 *p_hi = hi;
38027 *p_lo = lo;
38030 /* Return a string to fuse an addis instruction with a gpr load to the same
38031 register that we loaded up the addis instruction. The address that is used
38032 is the logical address that was formed during peephole2:
38033 (lo_sum (high) (low-part))
38035 Or the address is the TOC address that is wrapped before register allocation:
38036 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38038 The code is complicated, so we call output_asm_insn directly, and just
38039 return "". */
38041 const char *
38042 emit_fusion_gpr_load (rtx target, rtx mem)
38044 rtx addis_value;
38045 rtx addr;
38046 rtx load_offset;
38047 const char *load_str = NULL;
38048 const char *mode_name = NULL;
38049 machine_mode mode;
38051 if (GET_CODE (mem) == ZERO_EXTEND)
38052 mem = XEXP (mem, 0);
38054 gcc_assert (REG_P (target) && MEM_P (mem));
38056 addr = XEXP (mem, 0);
38057 fusion_split_address (addr, &addis_value, &load_offset);
38059 /* Now emit the load instruction to the same register. */
38060 mode = GET_MODE (mem);
38061 switch (mode)
38063 case QImode:
38064 mode_name = "char";
38065 load_str = "lbz";
38066 break;
38068 case HImode:
38069 mode_name = "short";
38070 load_str = "lhz";
38071 break;
38073 case SImode:
38074 case SFmode:
38075 mode_name = (mode == SFmode) ? "float" : "int";
38076 load_str = "lwz";
38077 break;
38079 case DImode:
38080 case DFmode:
38081 gcc_assert (TARGET_POWERPC64);
38082 mode_name = (mode == DFmode) ? "double" : "long";
38083 load_str = "ld";
38084 break;
38086 default:
38087 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38090 /* Emit the addis instruction. */
38091 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
38093 /* Emit the D-form load instruction. */
38094 emit_fusion_load_store (target, target, load_offset, load_str);
38096 return "";
38100 /* Return true if the peephole2 can combine a load/store involving a
38101 combination of an addis instruction and the memory operation. This was
38102 added to the ISA 3.0 (power9) hardware. */
38104 bool
38105 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38106 rtx addis_value, /* addis value. */
38107 rtx dest, /* destination (memory or register). */
38108 rtx src) /* source (register or memory). */
38110 rtx addr, mem, offset;
38111 enum machine_mode mode = GET_MODE (src);
38113 /* Validate arguments. */
38114 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38115 return false;
38117 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38118 return false;
38120 /* Ignore extend operations that are part of the load. */
38121 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38122 src = XEXP (src, 0);
38124 /* Test for memory<-register or register<-memory. */
38125 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38127 if (!MEM_P (dest))
38128 return false;
38130 mem = dest;
38133 else if (MEM_P (src))
38135 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38136 return false;
38138 mem = src;
38141 else
38142 return false;
38144 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38145 if (GET_CODE (addr) == PLUS)
38147 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38148 return false;
38150 return satisfies_constraint_I (XEXP (addr, 1));
38153 else if (GET_CODE (addr) == LO_SUM)
38155 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38156 return false;
38158 offset = XEXP (addr, 1);
38159 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38160 return small_toc_ref (offset, GET_MODE (offset));
38162 else if (TARGET_ELF && !TARGET_POWERPC64)
38163 return CONSTANT_P (offset);
38166 return false;
38169 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38170 load sequence.
38172 The operands are:
38173 operands[0] register set with addis
38174 operands[1] value set via addis
38175 operands[2] target register being loaded
38176 operands[3] D-form memory reference using operands[0].
38178 This is similar to the fusion introduced with power8, except it scales to
38179 both loads/stores and does not require the result register to be the same as
38180 the base register. At the moment, we only do this if register set with addis
38181 is dead. */
38183 void
38184 expand_fusion_p9_load (rtx *operands)
38186 rtx tmp_reg = operands[0];
38187 rtx addis_value = operands[1];
38188 rtx target = operands[2];
38189 rtx orig_mem = operands[3];
38190 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38191 enum rtx_code plus_or_lo_sum;
38192 machine_mode target_mode = GET_MODE (target);
38193 machine_mode extend_mode = target_mode;
38194 machine_mode ptr_mode = Pmode;
38195 enum rtx_code extend = UNKNOWN;
38197 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38199 extend = GET_CODE (orig_mem);
38200 orig_mem = XEXP (orig_mem, 0);
38201 target_mode = GET_MODE (orig_mem);
38204 gcc_assert (MEM_P (orig_mem));
38206 orig_addr = XEXP (orig_mem, 0);
38207 plus_or_lo_sum = GET_CODE (orig_addr);
38208 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38210 offset = XEXP (orig_addr, 1);
38211 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38212 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38214 if (extend != UNKNOWN)
38215 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38217 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38218 UNSPEC_FUSION_P9);
38220 set = gen_rtx_SET (target, new_mem);
38221 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38222 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38223 emit_insn (insn);
38225 return;
38228 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38229 store sequence.
38231 The operands are:
38232 operands[0] register set with addis
38233 operands[1] value set via addis
38234 operands[2] target D-form memory being stored to
38235 operands[3] register being stored
38237 This is similar to the fusion introduced with power8, except it scales to
38238 both loads/stores and does not require the result register to be the same as
38239 the base register. At the moment, we only do this if register set with addis
38240 is dead. */
38242 void
38243 expand_fusion_p9_store (rtx *operands)
38245 rtx tmp_reg = operands[0];
38246 rtx addis_value = operands[1];
38247 rtx orig_mem = operands[2];
38248 rtx src = operands[3];
38249 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38250 enum rtx_code plus_or_lo_sum;
38251 machine_mode target_mode = GET_MODE (orig_mem);
38252 machine_mode ptr_mode = Pmode;
38254 gcc_assert (MEM_P (orig_mem));
38256 orig_addr = XEXP (orig_mem, 0);
38257 plus_or_lo_sum = GET_CODE (orig_addr);
38258 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38260 offset = XEXP (orig_addr, 1);
38261 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38262 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38264 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38265 UNSPEC_FUSION_P9);
38267 set = gen_rtx_SET (new_mem, new_src);
38268 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38269 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38270 emit_insn (insn);
38272 return;
38275 /* Return a string to fuse an addis instruction with a load using extended
38276 fusion. The address that is used is the logical address that was formed
38277 during peephole2: (lo_sum (high) (low-part))
38279 The code is complicated, so we call output_asm_insn directly, and just
38280 return "". */
38282 const char *
38283 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38285 enum machine_mode mode = GET_MODE (reg);
38286 rtx hi;
38287 rtx lo;
38288 rtx addr;
38289 const char *load_string;
38290 int r;
38292 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38294 mem = XEXP (mem, 0);
38295 mode = GET_MODE (mem);
38298 if (GET_CODE (reg) == SUBREG)
38300 gcc_assert (SUBREG_BYTE (reg) == 0);
38301 reg = SUBREG_REG (reg);
38304 if (!REG_P (reg))
38305 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38307 r = REGNO (reg);
38308 if (FP_REGNO_P (r))
38310 if (mode == SFmode)
38311 load_string = "lfs";
38312 else if (mode == DFmode || mode == DImode)
38313 load_string = "lfd";
38314 else
38315 gcc_unreachable ();
38317 else if (INT_REGNO_P (r))
38319 switch (mode)
38321 case QImode:
38322 load_string = "lbz";
38323 break;
38324 case HImode:
38325 load_string = "lhz";
38326 break;
38327 case SImode:
38328 case SFmode:
38329 load_string = "lwz";
38330 break;
38331 case DImode:
38332 case DFmode:
38333 if (!TARGET_POWERPC64)
38334 gcc_unreachable ();
38335 load_string = "ld";
38336 break;
38337 default:
38338 gcc_unreachable ();
38341 else
38342 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38344 if (!MEM_P (mem))
38345 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38347 addr = XEXP (mem, 0);
38348 fusion_split_address (addr, &hi, &lo);
38350 /* Emit the addis instruction. */
38351 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
38353 /* Emit the D-form load instruction. */
38354 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38356 return "";
38359 /* Return a string to fuse an addis instruction with a store using extended
38360 fusion. The address that is used is the logical address that was formed
38361 during peephole2: (lo_sum (high) (low-part))
38363 The code is complicated, so we call output_asm_insn directly, and just
38364 return "". */
38366 const char *
38367 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38369 enum machine_mode mode = GET_MODE (reg);
38370 rtx hi;
38371 rtx lo;
38372 rtx addr;
38373 const char *store_string;
38374 int r;
38376 if (GET_CODE (reg) == SUBREG)
38378 gcc_assert (SUBREG_BYTE (reg) == 0);
38379 reg = SUBREG_REG (reg);
38382 if (!REG_P (reg))
38383 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38385 r = REGNO (reg);
38386 if (FP_REGNO_P (r))
38388 if (mode == SFmode)
38389 store_string = "stfs";
38390 else if (mode == DFmode)
38391 store_string = "stfd";
38392 else
38393 gcc_unreachable ();
38395 else if (INT_REGNO_P (r))
38397 switch (mode)
38399 case QImode:
38400 store_string = "stb";
38401 break;
38402 case HImode:
38403 store_string = "sth";
38404 break;
38405 case SImode:
38406 case SFmode:
38407 store_string = "stw";
38408 break;
38409 case DImode:
38410 case DFmode:
38411 if (!TARGET_POWERPC64)
38412 gcc_unreachable ();
38413 store_string = "std";
38414 break;
38415 default:
38416 gcc_unreachable ();
38419 else
38420 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38422 if (!MEM_P (mem))
38423 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38425 addr = XEXP (mem, 0);
38426 fusion_split_address (addr, &hi, &lo);
38428 /* Emit the addis instruction. */
38429 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
38431 /* Emit the D-form load instruction. */
38432 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38434 return "";
38438 /* Analyze vector computations and remove unnecessary doubleword
38439 swaps (xxswapdi instructions). This pass is performed only
38440 for little-endian VSX code generation.
38442 For this specific case, loads and stores of 4x32 and 2x64 vectors
38443 are inefficient. These are implemented using the lvx2dx and
38444 stvx2dx instructions, which invert the order of doublewords in
38445 a vector register. Thus the code generation inserts an xxswapdi
38446 after each such load, and prior to each such store. (For spill
38447 code after register assignment, an additional xxswapdi is inserted
38448 following each store in order to return a hard register to its
38449 unpermuted value.)
38451 The extra xxswapdi instructions reduce performance. This can be
38452 particularly bad for vectorized code. The purpose of this pass
38453 is to reduce the number of xxswapdi instructions required for
38454 correctness.
38456 The primary insight is that much code that operates on vectors
38457 does not care about the relative order of elements in a register,
38458 so long as the correct memory order is preserved. If we have
38459 a computation where all input values are provided by lvxd2x/xxswapdi
38460 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
38461 and all intermediate computations are pure SIMD (independent of
38462 element order), then all the xxswapdi's associated with the loads
38463 and stores may be removed.
38465 This pass uses some of the infrastructure and logical ideas from
38466 the "web" pass in web.c. We create maximal webs of computations
38467 fitting the description above using union-find. Each such web is
38468 then optimized by removing its unnecessary xxswapdi instructions.
38470 The pass is placed prior to global optimization so that we can
38471 perform the optimization in the safest and simplest way possible;
38472 that is, by replacing each xxswapdi insn with a register copy insn.
38473 Subsequent forward propagation will remove copies where possible.
38475 There are some operations sensitive to element order for which we
38476 can still allow the operation, provided we modify those operations.
38477 These include CONST_VECTORs, for which we must swap the first and
38478 second halves of the constant vector; and SUBREGs, for which we
38479 must adjust the byte offset to account for the swapped doublewords.
38480 A remaining opportunity would be non-immediate-form splats, for
38481 which we should adjust the selected lane of the input. We should
38482 also make code generation adjustments for sum-across operations,
38483 since this is a common vectorizer reduction.
38485 Because we run prior to the first split, we can see loads and stores
38486 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
38487 vector loads and stores that have not yet been split into a permuting
38488 load/store and a swap. (One way this can happen is with a builtin
38489 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
38490 than deleting a swap, we convert the load/store into a permuting
38491 load/store (which effectively removes the swap). */
38493 /* Notes on Permutes
38495 We do not currently handle computations that contain permutes. There
38496 is a general transformation that can be performed correctly, but it
38497 may introduce more expensive code than it replaces. To handle these
38498 would require a cost model to determine when to perform the optimization.
38499 This commentary records how this could be done if desired.
38501 The most general permute is something like this (example for V16QI):
38503 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
38504 (parallel [(const_int a0) (const_int a1)
38506 (const_int a14) (const_int a15)]))
38508 where a0,...,a15 are in [0,31] and select elements from op1 and op2
38509 to produce in the result.
38511 Regardless of mode, we can convert the PARALLEL to a mask of 16
38512 byte-element selectors. Let's call this M, with M[i] representing
38513 the ith byte-element selector value. Then if we swap doublewords
38514 throughout the computation, we can get correct behavior by replacing
38515 M with M' as follows:
38517 M'[i] = { (M[i]+8)%16 : M[i] in [0,15]
38518 { ((M[i]+8)%16)+16 : M[i] in [16,31]
38520 This seems promising at first, since we are just replacing one mask
38521 with another. But certain masks are preferable to others. If M
38522 is a mask that matches a vmrghh pattern, for example, M' certainly
38523 will not. Instead of a single vmrghh, we would generate a load of
38524 M' and a vperm. So we would need to know how many xxswapd's we can
38525 remove as a result of this transformation to determine if it's
38526 profitable; and preferably the logic would need to be aware of all
38527 the special preferable masks.
38529 Another form of permute is an UNSPEC_VPERM, in which the mask is
38530 already in a register. In some cases, this mask may be a constant
38531 that we can discover with ud-chains, in which case the above
38532 transformation is ok. However, the common usage here is for the
38533 mask to be produced by an UNSPEC_LVSL, in which case the mask
38534 cannot be known at compile time. In such a case we would have to
38535 generate several instructions to compute M' as above at run time,
38536 and a cost model is needed again.
38538 However, when the mask M for an UNSPEC_VPERM is loaded from the
38539 constant pool, we can replace M with M' as above at no cost
38540 beyond adding a constant pool entry. */
38542 /* This is based on the union-find logic in web.c. web_entry_base is
38543 defined in df.h. */
38544 class swap_web_entry : public web_entry_base
38546 public:
38547 /* Pointer to the insn. */
38548 rtx_insn *insn;
38549 /* Set if insn contains a mention of a vector register. All other
38550 fields are undefined if this field is unset. */
38551 unsigned int is_relevant : 1;
38552 /* Set if insn is a load. */
38553 unsigned int is_load : 1;
38554 /* Set if insn is a store. */
38555 unsigned int is_store : 1;
38556 /* Set if insn is a doubleword swap. This can either be a register swap
38557 or a permuting load or store (test is_load and is_store for this). */
38558 unsigned int is_swap : 1;
38559 /* Set if the insn has a live-in use of a parameter register. */
38560 unsigned int is_live_in : 1;
38561 /* Set if the insn has a live-out def of a return register. */
38562 unsigned int is_live_out : 1;
38563 /* Set if the insn contains a subreg reference of a vector register. */
38564 unsigned int contains_subreg : 1;
38565 /* Set if the insn contains a 128-bit integer operand. */
38566 unsigned int is_128_int : 1;
38567 /* Set if this is a call-insn. */
38568 unsigned int is_call : 1;
38569 /* Set if this insn does not perform a vector operation for which
38570 element order matters, or if we know how to fix it up if it does.
38571 Undefined if is_swap is set. */
38572 unsigned int is_swappable : 1;
38573 /* A nonzero value indicates what kind of special handling for this
38574 insn is required if doublewords are swapped. Undefined if
38575 is_swappable is not set. */
38576 unsigned int special_handling : 4;
38577 /* Set if the web represented by this entry cannot be optimized. */
38578 unsigned int web_not_optimizable : 1;
38579 /* Set if this insn should be deleted. */
38580 unsigned int will_delete : 1;
38583 enum special_handling_values {
38584 SH_NONE = 0,
38585 SH_CONST_VECTOR,
38586 SH_SUBREG,
38587 SH_NOSWAP_LD,
38588 SH_NOSWAP_ST,
38589 SH_EXTRACT,
38590 SH_SPLAT,
38591 SH_XXPERMDI,
38592 SH_CONCAT,
38593 SH_VPERM
38596 /* Union INSN with all insns containing definitions that reach USE.
38597 Detect whether USE is live-in to the current function. */
38598 static void
38599 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
38601 struct df_link *link = DF_REF_CHAIN (use);
38603 if (!link)
38604 insn_entry[INSN_UID (insn)].is_live_in = 1;
38606 while (link)
38608 if (DF_REF_IS_ARTIFICIAL (link->ref))
38609 insn_entry[INSN_UID (insn)].is_live_in = 1;
38611 if (DF_REF_INSN_INFO (link->ref))
38613 rtx def_insn = DF_REF_INSN (link->ref);
38614 (void)unionfind_union (insn_entry + INSN_UID (insn),
38615 insn_entry + INSN_UID (def_insn));
38618 link = link->next;
38622 /* Union INSN with all insns containing uses reached from DEF.
38623 Detect whether DEF is live-out from the current function. */
38624 static void
38625 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
38627 struct df_link *link = DF_REF_CHAIN (def);
38629 if (!link)
38630 insn_entry[INSN_UID (insn)].is_live_out = 1;
38632 while (link)
38634 /* This could be an eh use or some other artificial use;
38635 we treat these all the same (killing the optimization). */
38636 if (DF_REF_IS_ARTIFICIAL (link->ref))
38637 insn_entry[INSN_UID (insn)].is_live_out = 1;
38639 if (DF_REF_INSN_INFO (link->ref))
38641 rtx use_insn = DF_REF_INSN (link->ref);
38642 (void)unionfind_union (insn_entry + INSN_UID (insn),
38643 insn_entry + INSN_UID (use_insn));
38646 link = link->next;
38650 /* Return 1 iff INSN is a load insn, including permuting loads that
38651 represent an lvxd2x instruction; else return 0. */
38652 static unsigned int
38653 insn_is_load_p (rtx insn)
38655 rtx body = PATTERN (insn);
38657 if (GET_CODE (body) == SET)
38659 if (GET_CODE (SET_SRC (body)) == MEM)
38660 return 1;
38662 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
38663 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
38664 return 1;
38666 return 0;
38669 if (GET_CODE (body) != PARALLEL)
38670 return 0;
38672 rtx set = XVECEXP (body, 0, 0);
38674 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
38675 return 1;
38677 return 0;
38680 /* Return 1 iff INSN is a store insn, including permuting stores that
38681 represent an stvxd2x instruction; else return 0. */
38682 static unsigned int
38683 insn_is_store_p (rtx insn)
38685 rtx body = PATTERN (insn);
38686 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
38687 return 1;
38688 if (GET_CODE (body) != PARALLEL)
38689 return 0;
38690 rtx set = XVECEXP (body, 0, 0);
38691 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
38692 return 1;
38693 return 0;
38696 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
38697 a permuting load, or a permuting store. */
38698 static unsigned int
38699 insn_is_swap_p (rtx insn)
38701 rtx body = PATTERN (insn);
38702 if (GET_CODE (body) != SET)
38703 return 0;
38704 rtx rhs = SET_SRC (body);
38705 if (GET_CODE (rhs) != VEC_SELECT)
38706 return 0;
38707 rtx parallel = XEXP (rhs, 1);
38708 if (GET_CODE (parallel) != PARALLEL)
38709 return 0;
38710 unsigned int len = XVECLEN (parallel, 0);
38711 if (len != 2 && len != 4 && len != 8 && len != 16)
38712 return 0;
38713 for (unsigned int i = 0; i < len / 2; ++i)
38715 rtx op = XVECEXP (parallel, 0, i);
38716 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
38717 return 0;
38719 for (unsigned int i = len / 2; i < len; ++i)
38721 rtx op = XVECEXP (parallel, 0, i);
38722 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
38723 return 0;
38725 return 1;
38728 /* Return TRUE if insn is a swap fed by a load from the constant pool. */
38729 static bool
38730 const_load_sequence_p (swap_web_entry *insn_entry, rtx insn)
38732 unsigned uid = INSN_UID (insn);
38733 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load)
38734 return false;
38736 /* Find the unique use in the swap and locate its def. If the def
38737 isn't unique, punt. */
38738 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38739 df_ref use;
38740 FOR_EACH_INSN_INFO_USE (use, insn_info)
38742 struct df_link *def_link = DF_REF_CHAIN (use);
38743 if (!def_link || def_link->next)
38744 return false;
38746 rtx def_insn = DF_REF_INSN (def_link->ref);
38747 unsigned uid2 = INSN_UID (def_insn);
38748 if (!insn_entry[uid2].is_load || !insn_entry[uid2].is_swap)
38749 return false;
38751 rtx body = PATTERN (def_insn);
38752 if (GET_CODE (body) != SET
38753 || GET_CODE (SET_SRC (body)) != VEC_SELECT
38754 || GET_CODE (XEXP (SET_SRC (body), 0)) != MEM)
38755 return false;
38757 rtx mem = XEXP (SET_SRC (body), 0);
38758 rtx base_reg = XEXP (mem, 0);
38760 df_ref base_use;
38761 insn_info = DF_INSN_INFO_GET (def_insn);
38762 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
38764 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
38765 continue;
38767 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
38768 if (!base_def_link || base_def_link->next)
38769 return false;
38771 rtx tocrel_insn = DF_REF_INSN (base_def_link->ref);
38772 rtx tocrel_body = PATTERN (tocrel_insn);
38773 rtx base, offset;
38774 if (GET_CODE (tocrel_body) != SET)
38775 return false;
38776 /* There is an extra level of indirection for small/large
38777 code models. */
38778 rtx tocrel_expr = SET_SRC (tocrel_body);
38779 if (GET_CODE (tocrel_expr) == MEM)
38780 tocrel_expr = XEXP (tocrel_expr, 0);
38781 if (!toc_relative_expr_p (tocrel_expr, false))
38782 return false;
38783 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
38784 if (GET_CODE (base) != SYMBOL_REF || !CONSTANT_POOL_ADDRESS_P (base))
38785 return false;
38788 return true;
38791 /* Return TRUE iff OP matches a V2DF reduction pattern. See the
38792 definition of vsx_reduc_<VEC_reduc_name>_v2df in vsx.md. */
38793 static bool
38794 v2df_reduction_p (rtx op)
38796 if (GET_MODE (op) != V2DFmode)
38797 return false;
38799 enum rtx_code code = GET_CODE (op);
38800 if (code != PLUS && code != SMIN && code != SMAX)
38801 return false;
38803 rtx concat = XEXP (op, 0);
38804 if (GET_CODE (concat) != VEC_CONCAT)
38805 return false;
38807 rtx select0 = XEXP (concat, 0);
38808 rtx select1 = XEXP (concat, 1);
38809 if (GET_CODE (select0) != VEC_SELECT || GET_CODE (select1) != VEC_SELECT)
38810 return false;
38812 rtx reg0 = XEXP (select0, 0);
38813 rtx reg1 = XEXP (select1, 0);
38814 if (!rtx_equal_p (reg0, reg1) || !REG_P (reg0))
38815 return false;
38817 rtx parallel0 = XEXP (select0, 1);
38818 rtx parallel1 = XEXP (select1, 1);
38819 if (GET_CODE (parallel0) != PARALLEL || GET_CODE (parallel1) != PARALLEL)
38820 return false;
38822 if (!rtx_equal_p (XVECEXP (parallel0, 0, 0), const1_rtx)
38823 || !rtx_equal_p (XVECEXP (parallel1, 0, 0), const0_rtx))
38824 return false;
38826 return true;
38829 /* Return 1 iff OP is an operand that will not be affected by having
38830 vector doublewords swapped in memory. */
38831 static unsigned int
38832 rtx_is_swappable_p (rtx op, unsigned int *special)
38834 enum rtx_code code = GET_CODE (op);
38835 int i, j;
38836 rtx parallel;
38838 switch (code)
38840 case LABEL_REF:
38841 case SYMBOL_REF:
38842 case CLOBBER:
38843 case REG:
38844 return 1;
38846 case VEC_CONCAT:
38847 case ASM_INPUT:
38848 case ASM_OPERANDS:
38849 return 0;
38851 case CONST_VECTOR:
38853 *special = SH_CONST_VECTOR;
38854 return 1;
38857 case VEC_DUPLICATE:
38858 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
38859 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
38860 it represents a vector splat for which we can do special
38861 handling. */
38862 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
38863 return 1;
38864 else if (GET_CODE (XEXP (op, 0)) == REG
38865 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
38866 /* This catches V2DF and V2DI splat, at a minimum. */
38867 return 1;
38868 else if (GET_CODE (XEXP (op, 0)) == VEC_SELECT)
38869 /* If the duplicated item is from a select, defer to the select
38870 processing to see if we can change the lane for the splat. */
38871 return rtx_is_swappable_p (XEXP (op, 0), special);
38872 else
38873 return 0;
38875 case VEC_SELECT:
38876 /* A vec_extract operation is ok if we change the lane. */
38877 if (GET_CODE (XEXP (op, 0)) == REG
38878 && GET_MODE_INNER (GET_MODE (XEXP (op, 0))) == GET_MODE (op)
38879 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
38880 && XVECLEN (parallel, 0) == 1
38881 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT)
38883 *special = SH_EXTRACT;
38884 return 1;
38886 /* An XXPERMDI is ok if we adjust the lanes. Note that if the
38887 XXPERMDI is a swap operation, it will be identified by
38888 insn_is_swap_p and therefore we won't get here. */
38889 else if (GET_CODE (XEXP (op, 0)) == VEC_CONCAT
38890 && (GET_MODE (XEXP (op, 0)) == V4DFmode
38891 || GET_MODE (XEXP (op, 0)) == V4DImode)
38892 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
38893 && XVECLEN (parallel, 0) == 2
38894 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT
38895 && GET_CODE (XVECEXP (parallel, 0, 1)) == CONST_INT)
38897 *special = SH_XXPERMDI;
38898 return 1;
38900 else if (v2df_reduction_p (op))
38901 return 1;
38902 else
38903 return 0;
38905 case UNSPEC:
38907 /* Various operations are unsafe for this optimization, at least
38908 without significant additional work. Permutes are obviously
38909 problematic, as both the permute control vector and the ordering
38910 of the target values are invalidated by doubleword swapping.
38911 Vector pack and unpack modify the number of vector lanes.
38912 Merge-high/low will not operate correctly on swapped operands.
38913 Vector shifts across element boundaries are clearly uncool,
38914 as are vector select and concatenate operations. Vector
38915 sum-across instructions define one operand with a specific
38916 order-dependent element, so additional fixup code would be
38917 needed to make those work. Vector set and non-immediate-form
38918 vector splat are element-order sensitive. A few of these
38919 cases might be workable with special handling if required.
38920 Adding cost modeling would be appropriate in some cases. */
38921 int val = XINT (op, 1);
38922 switch (val)
38924 default:
38925 break;
38926 case UNSPEC_VMRGH_DIRECT:
38927 case UNSPEC_VMRGL_DIRECT:
38928 case UNSPEC_VPACK_SIGN_SIGN_SAT:
38929 case UNSPEC_VPACK_SIGN_UNS_SAT:
38930 case UNSPEC_VPACK_UNS_UNS_MOD:
38931 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT:
38932 case UNSPEC_VPACK_UNS_UNS_SAT:
38933 case UNSPEC_VPERM:
38934 case UNSPEC_VPERM_UNS:
38935 case UNSPEC_VPERMHI:
38936 case UNSPEC_VPERMSI:
38937 case UNSPEC_VPKPX:
38938 case UNSPEC_VSLDOI:
38939 case UNSPEC_VSLO:
38940 case UNSPEC_VSRO:
38941 case UNSPEC_VSUM2SWS:
38942 case UNSPEC_VSUM4S:
38943 case UNSPEC_VSUM4UBS:
38944 case UNSPEC_VSUMSWS:
38945 case UNSPEC_VSUMSWS_DIRECT:
38946 case UNSPEC_VSX_CONCAT:
38947 case UNSPEC_VSX_SET:
38948 case UNSPEC_VSX_SLDWI:
38949 case UNSPEC_VUNPACK_HI_SIGN:
38950 case UNSPEC_VUNPACK_HI_SIGN_DIRECT:
38951 case UNSPEC_VUNPACK_LO_SIGN:
38952 case UNSPEC_VUNPACK_LO_SIGN_DIRECT:
38953 case UNSPEC_VUPKHPX:
38954 case UNSPEC_VUPKHS_V4SF:
38955 case UNSPEC_VUPKHU_V4SF:
38956 case UNSPEC_VUPKLPX:
38957 case UNSPEC_VUPKLS_V4SF:
38958 case UNSPEC_VUPKLU_V4SF:
38959 case UNSPEC_VSX_CVDPSPN:
38960 case UNSPEC_VSX_CVSPDP:
38961 case UNSPEC_VSX_CVSPDPN:
38962 case UNSPEC_VSX_EXTRACT:
38963 case UNSPEC_VSX_VSLO:
38964 return 0;
38965 case UNSPEC_VSPLT_DIRECT:
38966 *special = SH_SPLAT;
38967 return 1;
38968 case UNSPEC_REDUC_PLUS:
38969 case UNSPEC_REDUC:
38970 return 1;
38974 default:
38975 break;
38978 const char *fmt = GET_RTX_FORMAT (code);
38979 int ok = 1;
38981 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
38982 if (fmt[i] == 'e' || fmt[i] == 'u')
38984 unsigned int special_op = SH_NONE;
38985 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
38986 if (special_op == SH_NONE)
38987 continue;
38988 /* Ensure we never have two kinds of special handling
38989 for the same insn. */
38990 if (*special != SH_NONE && *special != special_op)
38991 return 0;
38992 *special = special_op;
38994 else if (fmt[i] == 'E')
38995 for (j = 0; j < XVECLEN (op, i); ++j)
38997 unsigned int special_op = SH_NONE;
38998 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
38999 if (special_op == SH_NONE)
39000 continue;
39001 /* Ensure we never have two kinds of special handling
39002 for the same insn. */
39003 if (*special != SH_NONE && *special != special_op)
39004 return 0;
39005 *special = special_op;
39008 return ok;
39011 /* Return 1 iff INSN is an operand that will not be affected by
39012 having vector doublewords swapped in memory (in which case
39013 *SPECIAL is unchanged), or that can be modified to be correct
39014 if vector doublewords are swapped in memory (in which case
39015 *SPECIAL is changed to a value indicating how). */
39016 static unsigned int
39017 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
39018 unsigned int *special)
39020 /* Calls are always bad. */
39021 if (GET_CODE (insn) == CALL_INSN)
39022 return 0;
39024 /* Loads and stores seen here are not permuting, but we can still
39025 fix them up by converting them to permuting ones. Exceptions:
39026 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
39027 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
39028 for the SET source. Also we must now make an exception for lvx
39029 and stvx when they are not in the UNSPEC_LVX/STVX form (with the
39030 explicit "& -16") since this leads to unrecognizable insns. */
39031 rtx body = PATTERN (insn);
39032 int i = INSN_UID (insn);
39034 if (insn_entry[i].is_load)
39036 if (GET_CODE (body) == SET)
39038 rtx rhs = SET_SRC (body);
39039 gcc_assert (GET_CODE (rhs) == MEM);
39040 if (GET_CODE (XEXP (rhs, 0)) == AND)
39041 return 0;
39043 *special = SH_NOSWAP_LD;
39044 return 1;
39046 else
39047 return 0;
39050 if (insn_entry[i].is_store)
39052 if (GET_CODE (body) == SET
39053 && GET_CODE (SET_SRC (body)) != UNSPEC)
39055 rtx lhs = SET_DEST (body);
39056 gcc_assert (GET_CODE (lhs) == MEM);
39057 if (GET_CODE (XEXP (lhs, 0)) == AND)
39058 return 0;
39060 *special = SH_NOSWAP_ST;
39061 return 1;
39063 else
39064 return 0;
39067 /* A convert to single precision can be left as is provided that
39068 all of its uses are in xxspltw instructions that splat BE element
39069 zero. */
39070 if (GET_CODE (body) == SET
39071 && GET_CODE (SET_SRC (body)) == UNSPEC
39072 && XINT (SET_SRC (body), 1) == UNSPEC_VSX_CVDPSPN)
39074 df_ref def;
39075 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39077 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39079 struct df_link *link = DF_REF_CHAIN (def);
39080 if (!link)
39081 return 0;
39083 for (; link; link = link->next) {
39084 rtx use_insn = DF_REF_INSN (link->ref);
39085 rtx use_body = PATTERN (use_insn);
39086 if (GET_CODE (use_body) != SET
39087 || GET_CODE (SET_SRC (use_body)) != UNSPEC
39088 || XINT (SET_SRC (use_body), 1) != UNSPEC_VSX_XXSPLTW
39089 || XEXP (XEXP (SET_SRC (use_body), 0), 1) != const0_rtx)
39090 return 0;
39094 return 1;
39097 /* A concatenation of two doublewords is ok if we reverse the
39098 order of the inputs. */
39099 if (GET_CODE (body) == SET
39100 && GET_CODE (SET_SRC (body)) == VEC_CONCAT
39101 && (GET_MODE (SET_SRC (body)) == V2DFmode
39102 || GET_MODE (SET_SRC (body)) == V2DImode))
39104 *special = SH_CONCAT;
39105 return 1;
39108 /* V2DF reductions are always swappable. */
39109 if (GET_CODE (body) == PARALLEL)
39111 rtx expr = XVECEXP (body, 0, 0);
39112 if (GET_CODE (expr) == SET
39113 && v2df_reduction_p (SET_SRC (expr)))
39114 return 1;
39117 /* An UNSPEC_VPERM is ok if the mask operand is loaded from the
39118 constant pool. */
39119 if (GET_CODE (body) == SET
39120 && GET_CODE (SET_SRC (body)) == UNSPEC
39121 && XINT (SET_SRC (body), 1) == UNSPEC_VPERM
39122 && XVECLEN (SET_SRC (body), 0) == 3
39123 && GET_CODE (XVECEXP (SET_SRC (body), 0, 2)) == REG)
39125 rtx mask_reg = XVECEXP (SET_SRC (body), 0, 2);
39126 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39127 df_ref use;
39128 FOR_EACH_INSN_INFO_USE (use, insn_info)
39129 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
39131 struct df_link *def_link = DF_REF_CHAIN (use);
39132 /* Punt if multiple definitions for this reg. */
39133 if (def_link && !def_link->next &&
39134 const_load_sequence_p (insn_entry,
39135 DF_REF_INSN (def_link->ref)))
39137 *special = SH_VPERM;
39138 return 1;
39143 /* Otherwise check the operands for vector lane violations. */
39144 return rtx_is_swappable_p (body, special);
39147 enum chain_purpose { FOR_LOADS, FOR_STORES };
39149 /* Return true if the UD or DU chain headed by LINK is non-empty,
39150 and every entry on the chain references an insn that is a
39151 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
39152 register swap must have only permuting loads as reaching defs.
39153 If PURPOSE is FOR_STORES, each such register swap must have only
39154 register swaps or permuting stores as reached uses. */
39155 static bool
39156 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
39157 enum chain_purpose purpose)
39159 if (!link)
39160 return false;
39162 for (; link; link = link->next)
39164 if (!ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (DF_REF_REG (link->ref))))
39165 continue;
39167 if (DF_REF_IS_ARTIFICIAL (link->ref))
39168 return false;
39170 rtx reached_insn = DF_REF_INSN (link->ref);
39171 unsigned uid = INSN_UID (reached_insn);
39172 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
39174 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
39175 || insn_entry[uid].is_store)
39176 return false;
39178 if (purpose == FOR_LOADS)
39180 df_ref use;
39181 FOR_EACH_INSN_INFO_USE (use, insn_info)
39183 struct df_link *swap_link = DF_REF_CHAIN (use);
39185 while (swap_link)
39187 if (DF_REF_IS_ARTIFICIAL (link->ref))
39188 return false;
39190 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
39191 unsigned uid2 = INSN_UID (swap_def_insn);
39193 /* Only permuting loads are allowed. */
39194 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
39195 return false;
39197 swap_link = swap_link->next;
39201 else if (purpose == FOR_STORES)
39203 df_ref def;
39204 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39206 struct df_link *swap_link = DF_REF_CHAIN (def);
39208 while (swap_link)
39210 if (DF_REF_IS_ARTIFICIAL (link->ref))
39211 return false;
39213 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
39214 unsigned uid2 = INSN_UID (swap_use_insn);
39216 /* Permuting stores or register swaps are allowed. */
39217 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
39218 return false;
39220 swap_link = swap_link->next;
39226 return true;
39229 /* Mark the xxswapdi instructions associated with permuting loads and
39230 stores for removal. Note that we only flag them for deletion here,
39231 as there is a possibility of a swap being reached from multiple
39232 loads, etc. */
39233 static void
39234 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
39236 rtx insn = insn_entry[i].insn;
39237 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39239 if (insn_entry[i].is_load)
39241 df_ref def;
39242 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39244 struct df_link *link = DF_REF_CHAIN (def);
39246 /* We know by now that these are swaps, so we can delete
39247 them confidently. */
39248 while (link)
39250 rtx use_insn = DF_REF_INSN (link->ref);
39251 insn_entry[INSN_UID (use_insn)].will_delete = 1;
39252 link = link->next;
39256 else if (insn_entry[i].is_store)
39258 df_ref use;
39259 FOR_EACH_INSN_INFO_USE (use, insn_info)
39261 /* Ignore uses for addressability. */
39262 machine_mode mode = GET_MODE (DF_REF_REG (use));
39263 if (!ALTIVEC_OR_VSX_VECTOR_MODE (mode))
39264 continue;
39266 struct df_link *link = DF_REF_CHAIN (use);
39268 /* We know by now that these are swaps, so we can delete
39269 them confidently. */
39270 while (link)
39272 rtx def_insn = DF_REF_INSN (link->ref);
39273 insn_entry[INSN_UID (def_insn)].will_delete = 1;
39274 link = link->next;
39280 /* OP is either a CONST_VECTOR or an expression containing one.
39281 Swap the first half of the vector with the second in the first
39282 case. Recurse to find it in the second. */
39283 static void
39284 swap_const_vector_halves (rtx op)
39286 int i;
39287 enum rtx_code code = GET_CODE (op);
39288 if (GET_CODE (op) == CONST_VECTOR)
39290 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
39291 for (i = 0; i < half_units; ++i)
39293 rtx temp = CONST_VECTOR_ELT (op, i);
39294 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
39295 CONST_VECTOR_ELT (op, i + half_units) = temp;
39298 else
39300 int j;
39301 const char *fmt = GET_RTX_FORMAT (code);
39302 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
39303 if (fmt[i] == 'e' || fmt[i] == 'u')
39304 swap_const_vector_halves (XEXP (op, i));
39305 else if (fmt[i] == 'E')
39306 for (j = 0; j < XVECLEN (op, i); ++j)
39307 swap_const_vector_halves (XVECEXP (op, i, j));
39311 /* Find all subregs of a vector expression that perform a narrowing,
39312 and adjust the subreg index to account for doubleword swapping. */
39313 static void
39314 adjust_subreg_index (rtx op)
39316 enum rtx_code code = GET_CODE (op);
39317 if (code == SUBREG
39318 && (GET_MODE_SIZE (GET_MODE (op))
39319 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
39321 unsigned int index = SUBREG_BYTE (op);
39322 if (index < 8)
39323 index += 8;
39324 else
39325 index -= 8;
39326 SUBREG_BYTE (op) = index;
39329 const char *fmt = GET_RTX_FORMAT (code);
39330 int i,j;
39331 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
39332 if (fmt[i] == 'e' || fmt[i] == 'u')
39333 adjust_subreg_index (XEXP (op, i));
39334 else if (fmt[i] == 'E')
39335 for (j = 0; j < XVECLEN (op, i); ++j)
39336 adjust_subreg_index (XVECEXP (op, i, j));
39339 /* Convert the non-permuting load INSN to a permuting one. */
39340 static void
39341 permute_load (rtx_insn *insn)
39343 rtx body = PATTERN (insn);
39344 rtx mem_op = SET_SRC (body);
39345 rtx tgt_reg = SET_DEST (body);
39346 machine_mode mode = GET_MODE (tgt_reg);
39347 int n_elts = GET_MODE_NUNITS (mode);
39348 int half_elts = n_elts / 2;
39349 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
39350 int i, j;
39351 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
39352 XVECEXP (par, 0, i) = GEN_INT (j);
39353 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
39354 XVECEXP (par, 0, i) = GEN_INT (j);
39355 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
39356 SET_SRC (body) = sel;
39357 INSN_CODE (insn) = -1; /* Force re-recognition. */
39358 df_insn_rescan (insn);
39360 if (dump_file)
39361 fprintf (dump_file, "Replacing load %d with permuted load\n",
39362 INSN_UID (insn));
39365 /* Convert the non-permuting store INSN to a permuting one. */
39366 static void
39367 permute_store (rtx_insn *insn)
39369 rtx body = PATTERN (insn);
39370 rtx src_reg = SET_SRC (body);
39371 machine_mode mode = GET_MODE (src_reg);
39372 int n_elts = GET_MODE_NUNITS (mode);
39373 int half_elts = n_elts / 2;
39374 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
39375 int i, j;
39376 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
39377 XVECEXP (par, 0, i) = GEN_INT (j);
39378 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
39379 XVECEXP (par, 0, i) = GEN_INT (j);
39380 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
39381 SET_SRC (body) = sel;
39382 INSN_CODE (insn) = -1; /* Force re-recognition. */
39383 df_insn_rescan (insn);
39385 if (dump_file)
39386 fprintf (dump_file, "Replacing store %d with permuted store\n",
39387 INSN_UID (insn));
39390 /* Given OP that contains a vector extract operation, adjust the index
39391 of the extracted lane to account for the doubleword swap. */
39392 static void
39393 adjust_extract (rtx_insn *insn)
39395 rtx pattern = PATTERN (insn);
39396 if (GET_CODE (pattern) == PARALLEL)
39397 pattern = XVECEXP (pattern, 0, 0);
39398 rtx src = SET_SRC (pattern);
39399 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
39400 account for that. */
39401 rtx sel = GET_CODE (src) == VEC_DUPLICATE ? XEXP (src, 0) : src;
39402 rtx par = XEXP (sel, 1);
39403 int half_elts = GET_MODE_NUNITS (GET_MODE (XEXP (sel, 0))) >> 1;
39404 int lane = INTVAL (XVECEXP (par, 0, 0));
39405 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
39406 XVECEXP (par, 0, 0) = GEN_INT (lane);
39407 INSN_CODE (insn) = -1; /* Force re-recognition. */
39408 df_insn_rescan (insn);
39410 if (dump_file)
39411 fprintf (dump_file, "Changing lane for extract %d\n", INSN_UID (insn));
39414 /* Given OP that contains a vector direct-splat operation, adjust the index
39415 of the source lane to account for the doubleword swap. */
39416 static void
39417 adjust_splat (rtx_insn *insn)
39419 rtx body = PATTERN (insn);
39420 rtx unspec = XEXP (body, 1);
39421 int half_elts = GET_MODE_NUNITS (GET_MODE (unspec)) >> 1;
39422 int lane = INTVAL (XVECEXP (unspec, 0, 1));
39423 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
39424 XVECEXP (unspec, 0, 1) = GEN_INT (lane);
39425 INSN_CODE (insn) = -1; /* Force re-recognition. */
39426 df_insn_rescan (insn);
39428 if (dump_file)
39429 fprintf (dump_file, "Changing lane for splat %d\n", INSN_UID (insn));
39432 /* Given OP that contains an XXPERMDI operation (that is not a doubleword
39433 swap), reverse the order of the source operands and adjust the indices
39434 of the source lanes to account for doubleword reversal. */
39435 static void
39436 adjust_xxpermdi (rtx_insn *insn)
39438 rtx set = PATTERN (insn);
39439 rtx select = XEXP (set, 1);
39440 rtx concat = XEXP (select, 0);
39441 rtx src0 = XEXP (concat, 0);
39442 XEXP (concat, 0) = XEXP (concat, 1);
39443 XEXP (concat, 1) = src0;
39444 rtx parallel = XEXP (select, 1);
39445 int lane0 = INTVAL (XVECEXP (parallel, 0, 0));
39446 int lane1 = INTVAL (XVECEXP (parallel, 0, 1));
39447 int new_lane0 = 3 - lane1;
39448 int new_lane1 = 3 - lane0;
39449 XVECEXP (parallel, 0, 0) = GEN_INT (new_lane0);
39450 XVECEXP (parallel, 0, 1) = GEN_INT (new_lane1);
39451 INSN_CODE (insn) = -1; /* Force re-recognition. */
39452 df_insn_rescan (insn);
39454 if (dump_file)
39455 fprintf (dump_file, "Changing lanes for xxpermdi %d\n", INSN_UID (insn));
39458 /* Given OP that contains a VEC_CONCAT operation of two doublewords,
39459 reverse the order of those inputs. */
39460 static void
39461 adjust_concat (rtx_insn *insn)
39463 rtx set = PATTERN (insn);
39464 rtx concat = XEXP (set, 1);
39465 rtx src0 = XEXP (concat, 0);
39466 XEXP (concat, 0) = XEXP (concat, 1);
39467 XEXP (concat, 1) = src0;
39468 INSN_CODE (insn) = -1; /* Force re-recognition. */
39469 df_insn_rescan (insn);
39471 if (dump_file)
39472 fprintf (dump_file, "Reversing inputs for concat %d\n", INSN_UID (insn));
39475 /* Given an UNSPEC_VPERM insn, modify the mask loaded from the
39476 constant pool to reflect swapped doublewords. */
39477 static void
39478 adjust_vperm (rtx_insn *insn)
39480 /* We previously determined that the UNSPEC_VPERM was fed by a
39481 swap of a swapping load of a TOC-relative constant pool symbol.
39482 Find the MEM in the swapping load and replace it with a MEM for
39483 the adjusted mask constant. */
39484 rtx set = PATTERN (insn);
39485 rtx mask_reg = XVECEXP (SET_SRC (set), 0, 2);
39487 /* Find the swap. */
39488 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39489 df_ref use;
39490 rtx_insn *swap_insn = 0;
39491 FOR_EACH_INSN_INFO_USE (use, insn_info)
39492 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
39494 struct df_link *def_link = DF_REF_CHAIN (use);
39495 gcc_assert (def_link && !def_link->next);
39496 swap_insn = DF_REF_INSN (def_link->ref);
39497 break;
39499 gcc_assert (swap_insn);
39501 /* Find the load. */
39502 insn_info = DF_INSN_INFO_GET (swap_insn);
39503 rtx_insn *load_insn = 0;
39504 FOR_EACH_INSN_INFO_USE (use, insn_info)
39506 struct df_link *def_link = DF_REF_CHAIN (use);
39507 gcc_assert (def_link && !def_link->next);
39508 load_insn = DF_REF_INSN (def_link->ref);
39509 break;
39511 gcc_assert (load_insn);
39513 /* Find the TOC-relative symbol access. */
39514 insn_info = DF_INSN_INFO_GET (load_insn);
39515 rtx_insn *tocrel_insn = 0;
39516 FOR_EACH_INSN_INFO_USE (use, insn_info)
39518 struct df_link *def_link = DF_REF_CHAIN (use);
39519 gcc_assert (def_link && !def_link->next);
39520 tocrel_insn = DF_REF_INSN (def_link->ref);
39521 break;
39523 gcc_assert (tocrel_insn);
39525 /* Find the embedded CONST_VECTOR. We have to call toc_relative_expr_p
39526 to set tocrel_base; otherwise it would be unnecessary as we've
39527 already established it will return true. */
39528 rtx base, offset;
39529 rtx tocrel_expr = SET_SRC (PATTERN (tocrel_insn));
39530 /* There is an extra level of indirection for small/large code models. */
39531 if (GET_CODE (tocrel_expr) == MEM)
39532 tocrel_expr = XEXP (tocrel_expr, 0);
39533 if (!toc_relative_expr_p (tocrel_expr, false))
39534 gcc_unreachable ();
39535 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
39536 rtx const_vector = get_pool_constant (base);
39537 /* With the extra indirection, get_pool_constant will produce the
39538 real constant from the reg_equal expression, so get the real
39539 constant. */
39540 if (GET_CODE (const_vector) == SYMBOL_REF)
39541 const_vector = get_pool_constant (const_vector);
39542 gcc_assert (GET_CODE (const_vector) == CONST_VECTOR);
39544 /* Create an adjusted mask from the initial mask. */
39545 unsigned int new_mask[16], i, val;
39546 for (i = 0; i < 16; ++i) {
39547 val = INTVAL (XVECEXP (const_vector, 0, i));
39548 if (val < 16)
39549 new_mask[i] = (val + 8) % 16;
39550 else
39551 new_mask[i] = ((val + 8) % 16) + 16;
39554 /* Create a new CONST_VECTOR and a MEM that references it. */
39555 rtx vals = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
39556 for (i = 0; i < 16; ++i)
39557 XVECEXP (vals, 0, i) = GEN_INT (new_mask[i]);
39558 rtx new_const_vector = gen_rtx_CONST_VECTOR (V16QImode, XVEC (vals, 0));
39559 rtx new_mem = force_const_mem (V16QImode, new_const_vector);
39560 /* This gives us a MEM whose base operand is a SYMBOL_REF, which we
39561 can't recognize. Force the SYMBOL_REF into a register. */
39562 if (!REG_P (XEXP (new_mem, 0))) {
39563 rtx base_reg = force_reg (Pmode, XEXP (new_mem, 0));
39564 XEXP (new_mem, 0) = base_reg;
39565 /* Move the newly created insn ahead of the load insn. */
39566 rtx_insn *force_insn = get_last_insn ();
39567 remove_insn (force_insn);
39568 rtx_insn *before_load_insn = PREV_INSN (load_insn);
39569 add_insn_after (force_insn, before_load_insn, BLOCK_FOR_INSN (load_insn));
39570 df_insn_rescan (before_load_insn);
39571 df_insn_rescan (force_insn);
39574 /* Replace the MEM in the load instruction and rescan it. */
39575 XEXP (SET_SRC (PATTERN (load_insn)), 0) = new_mem;
39576 INSN_CODE (load_insn) = -1; /* Force re-recognition. */
39577 df_insn_rescan (load_insn);
39579 if (dump_file)
39580 fprintf (dump_file, "Adjusting mask for vperm %d\n", INSN_UID (insn));
39583 /* The insn described by INSN_ENTRY[I] can be swapped, but only
39584 with special handling. Take care of that here. */
39585 static void
39586 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
39588 rtx_insn *insn = insn_entry[i].insn;
39589 rtx body = PATTERN (insn);
39591 switch (insn_entry[i].special_handling)
39593 default:
39594 gcc_unreachable ();
39595 case SH_CONST_VECTOR:
39597 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
39598 gcc_assert (GET_CODE (body) == SET);
39599 rtx rhs = SET_SRC (body);
39600 swap_const_vector_halves (rhs);
39601 if (dump_file)
39602 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
39603 break;
39605 case SH_SUBREG:
39606 /* A subreg of the same size is already safe. For subregs that
39607 select a smaller portion of a reg, adjust the index for
39608 swapped doublewords. */
39609 adjust_subreg_index (body);
39610 if (dump_file)
39611 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
39612 break;
39613 case SH_NOSWAP_LD:
39614 /* Convert a non-permuting load to a permuting one. */
39615 permute_load (insn);
39616 break;
39617 case SH_NOSWAP_ST:
39618 /* Convert a non-permuting store to a permuting one. */
39619 permute_store (insn);
39620 break;
39621 case SH_EXTRACT:
39622 /* Change the lane on an extract operation. */
39623 adjust_extract (insn);
39624 break;
39625 case SH_SPLAT:
39626 /* Change the lane on a direct-splat operation. */
39627 adjust_splat (insn);
39628 break;
39629 case SH_XXPERMDI:
39630 /* Change the lanes on an XXPERMDI operation. */
39631 adjust_xxpermdi (insn);
39632 break;
39633 case SH_CONCAT:
39634 /* Reverse the order of a concatenation operation. */
39635 adjust_concat (insn);
39636 break;
39637 case SH_VPERM:
39638 /* Change the mask loaded from the constant pool for a VPERM. */
39639 adjust_vperm (insn);
39640 break;
39644 /* Find the insn from the Ith table entry, which is known to be a
39645 register swap Y = SWAP(X). Replace it with a copy Y = X. */
39646 static void
39647 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
39649 rtx_insn *insn = insn_entry[i].insn;
39650 rtx body = PATTERN (insn);
39651 rtx src_reg = XEXP (SET_SRC (body), 0);
39652 rtx copy = gen_rtx_SET (SET_DEST (body), src_reg);
39653 rtx_insn *new_insn = emit_insn_before (copy, insn);
39654 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
39655 df_insn_rescan (new_insn);
39657 if (dump_file)
39659 unsigned int new_uid = INSN_UID (new_insn);
39660 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
39663 df_insn_delete (insn);
39664 remove_insn (insn);
39665 insn->set_deleted ();
39668 /* Dump the swap table to DUMP_FILE. */
39669 static void
39670 dump_swap_insn_table (swap_web_entry *insn_entry)
39672 int e = get_max_uid ();
39673 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
39675 for (int i = 0; i < e; ++i)
39676 if (insn_entry[i].is_relevant)
39678 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
39679 fprintf (dump_file, "%6d %6d ", i,
39680 pred_entry && pred_entry->insn
39681 ? INSN_UID (pred_entry->insn) : 0);
39682 if (insn_entry[i].is_load)
39683 fputs ("load ", dump_file);
39684 if (insn_entry[i].is_store)
39685 fputs ("store ", dump_file);
39686 if (insn_entry[i].is_swap)
39687 fputs ("swap ", dump_file);
39688 if (insn_entry[i].is_live_in)
39689 fputs ("live-in ", dump_file);
39690 if (insn_entry[i].is_live_out)
39691 fputs ("live-out ", dump_file);
39692 if (insn_entry[i].contains_subreg)
39693 fputs ("subreg ", dump_file);
39694 if (insn_entry[i].is_128_int)
39695 fputs ("int128 ", dump_file);
39696 if (insn_entry[i].is_call)
39697 fputs ("call ", dump_file);
39698 if (insn_entry[i].is_swappable)
39700 fputs ("swappable ", dump_file);
39701 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
39702 fputs ("special:constvec ", dump_file);
39703 else if (insn_entry[i].special_handling == SH_SUBREG)
39704 fputs ("special:subreg ", dump_file);
39705 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
39706 fputs ("special:load ", dump_file);
39707 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
39708 fputs ("special:store ", dump_file);
39709 else if (insn_entry[i].special_handling == SH_EXTRACT)
39710 fputs ("special:extract ", dump_file);
39711 else if (insn_entry[i].special_handling == SH_SPLAT)
39712 fputs ("special:splat ", dump_file);
39713 else if (insn_entry[i].special_handling == SH_XXPERMDI)
39714 fputs ("special:xxpermdi ", dump_file);
39715 else if (insn_entry[i].special_handling == SH_CONCAT)
39716 fputs ("special:concat ", dump_file);
39717 else if (insn_entry[i].special_handling == SH_VPERM)
39718 fputs ("special:vperm ", dump_file);
39720 if (insn_entry[i].web_not_optimizable)
39721 fputs ("unoptimizable ", dump_file);
39722 if (insn_entry[i].will_delete)
39723 fputs ("delete ", dump_file);
39724 fputs ("\n", dump_file);
39726 fputs ("\n", dump_file);
39729 /* Return RTX with its address canonicalized to (reg) or (+ reg reg).
39730 Here RTX is an (& addr (const_int -16)). Always return a new copy
39731 to avoid problems with combine. */
39732 static rtx
39733 alignment_with_canonical_addr (rtx align)
39735 rtx canon;
39736 rtx addr = XEXP (align, 0);
39738 if (REG_P (addr))
39739 canon = addr;
39741 else if (GET_CODE (addr) == PLUS)
39743 rtx addrop0 = XEXP (addr, 0);
39744 rtx addrop1 = XEXP (addr, 1);
39746 if (!REG_P (addrop0))
39747 addrop0 = force_reg (GET_MODE (addrop0), addrop0);
39749 if (!REG_P (addrop1))
39750 addrop1 = force_reg (GET_MODE (addrop1), addrop1);
39752 canon = gen_rtx_PLUS (GET_MODE (addr), addrop0, addrop1);
39755 else
39756 canon = force_reg (GET_MODE (addr), addr);
39758 return gen_rtx_AND (GET_MODE (align), canon, GEN_INT (-16));
39761 /* Check whether an rtx is an alignment mask, and if so, return
39762 a fully-expanded rtx for the masking operation. */
39763 static rtx
39764 alignment_mask (rtx_insn *insn)
39766 rtx body = PATTERN (insn);
39768 if (GET_CODE (body) != SET
39769 || GET_CODE (SET_SRC (body)) != AND
39770 || !REG_P (XEXP (SET_SRC (body), 0)))
39771 return 0;
39773 rtx mask = XEXP (SET_SRC (body), 1);
39775 if (GET_CODE (mask) == CONST_INT)
39777 if (INTVAL (mask) == -16)
39778 return alignment_with_canonical_addr (SET_SRC (body));
39779 else
39780 return 0;
39783 if (!REG_P (mask))
39784 return 0;
39786 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39787 df_ref use;
39788 rtx real_mask = 0;
39790 FOR_EACH_INSN_INFO_USE (use, insn_info)
39792 if (!rtx_equal_p (DF_REF_REG (use), mask))
39793 continue;
39795 struct df_link *def_link = DF_REF_CHAIN (use);
39796 if (!def_link || def_link->next)
39797 return 0;
39799 rtx_insn *const_insn = DF_REF_INSN (def_link->ref);
39800 rtx const_body = PATTERN (const_insn);
39801 if (GET_CODE (const_body) != SET)
39802 return 0;
39804 real_mask = SET_SRC (const_body);
39806 if (GET_CODE (real_mask) != CONST_INT
39807 || INTVAL (real_mask) != -16)
39808 return 0;
39811 if (real_mask == 0)
39812 return 0;
39814 return alignment_with_canonical_addr (SET_SRC (body));
39817 /* Given INSN that's a load or store based at BASE_REG, look for a
39818 feeding computation that aligns its address on a 16-byte boundary. */
39819 static rtx
39820 find_alignment_op (rtx_insn *insn, rtx base_reg)
39822 df_ref base_use;
39823 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39824 rtx and_operation = 0;
39826 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
39828 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
39829 continue;
39831 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
39832 if (!base_def_link || base_def_link->next)
39833 break;
39835 rtx_insn *and_insn = DF_REF_INSN (base_def_link->ref);
39836 and_operation = alignment_mask (and_insn);
39837 if (and_operation != 0)
39838 break;
39841 return and_operation;
39844 struct del_info { bool replace; rtx_insn *replace_insn; };
39846 /* If INSN is the load for an lvx pattern, put it in canonical form. */
39847 static void
39848 recombine_lvx_pattern (rtx_insn *insn, del_info *to_delete)
39850 rtx body = PATTERN (insn);
39851 gcc_assert (GET_CODE (body) == SET
39852 && GET_CODE (SET_SRC (body)) == VEC_SELECT
39853 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM);
39855 rtx mem = XEXP (SET_SRC (body), 0);
39856 rtx base_reg = XEXP (mem, 0);
39858 rtx and_operation = find_alignment_op (insn, base_reg);
39860 if (and_operation != 0)
39862 df_ref def;
39863 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39864 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39866 struct df_link *link = DF_REF_CHAIN (def);
39867 if (!link || link->next)
39868 break;
39870 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
39871 if (!insn_is_swap_p (swap_insn)
39872 || insn_is_load_p (swap_insn)
39873 || insn_is_store_p (swap_insn))
39874 break;
39876 /* Expected lvx pattern found. Change the swap to
39877 a copy, and propagate the AND operation into the
39878 load. */
39879 to_delete[INSN_UID (swap_insn)].replace = true;
39880 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
39882 XEXP (mem, 0) = and_operation;
39883 SET_SRC (body) = mem;
39884 INSN_CODE (insn) = -1; /* Force re-recognition. */
39885 df_insn_rescan (insn);
39887 if (dump_file)
39888 fprintf (dump_file, "lvx opportunity found at %d\n",
39889 INSN_UID (insn));
39894 /* If INSN is the store for an stvx pattern, put it in canonical form. */
39895 static void
39896 recombine_stvx_pattern (rtx_insn *insn, del_info *to_delete)
39898 rtx body = PATTERN (insn);
39899 gcc_assert (GET_CODE (body) == SET
39900 && GET_CODE (SET_DEST (body)) == MEM
39901 && GET_CODE (SET_SRC (body)) == VEC_SELECT);
39902 rtx mem = SET_DEST (body);
39903 rtx base_reg = XEXP (mem, 0);
39905 rtx and_operation = find_alignment_op (insn, base_reg);
39907 if (and_operation != 0)
39909 rtx src_reg = XEXP (SET_SRC (body), 0);
39910 df_ref src_use;
39911 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39912 FOR_EACH_INSN_INFO_USE (src_use, insn_info)
39914 if (!rtx_equal_p (DF_REF_REG (src_use), src_reg))
39915 continue;
39917 struct df_link *link = DF_REF_CHAIN (src_use);
39918 if (!link || link->next)
39919 break;
39921 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
39922 if (!insn_is_swap_p (swap_insn)
39923 || insn_is_load_p (swap_insn)
39924 || insn_is_store_p (swap_insn))
39925 break;
39927 /* Expected stvx pattern found. Change the swap to
39928 a copy, and propagate the AND operation into the
39929 store. */
39930 to_delete[INSN_UID (swap_insn)].replace = true;
39931 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
39933 XEXP (mem, 0) = and_operation;
39934 SET_SRC (body) = src_reg;
39935 INSN_CODE (insn) = -1; /* Force re-recognition. */
39936 df_insn_rescan (insn);
39938 if (dump_file)
39939 fprintf (dump_file, "stvx opportunity found at %d\n",
39940 INSN_UID (insn));
39945 /* Look for patterns created from builtin lvx and stvx calls, and
39946 canonicalize them to be properly recognized as such. */
39947 static void
39948 recombine_lvx_stvx_patterns (function *fun)
39950 int i;
39951 basic_block bb;
39952 rtx_insn *insn;
39954 int num_insns = get_max_uid ();
39955 del_info *to_delete = XCNEWVEC (del_info, num_insns);
39957 FOR_ALL_BB_FN (bb, fun)
39958 FOR_BB_INSNS (bb, insn)
39960 if (!NONDEBUG_INSN_P (insn))
39961 continue;
39963 if (insn_is_load_p (insn) && insn_is_swap_p (insn))
39964 recombine_lvx_pattern (insn, to_delete);
39965 else if (insn_is_store_p (insn) && insn_is_swap_p (insn))
39966 recombine_stvx_pattern (insn, to_delete);
39969 /* Turning swaps into copies is delayed until now, to avoid problems
39970 with deleting instructions during the insn walk. */
39971 for (i = 0; i < num_insns; i++)
39972 if (to_delete[i].replace)
39974 rtx swap_body = PATTERN (to_delete[i].replace_insn);
39975 rtx src_reg = XEXP (SET_SRC (swap_body), 0);
39976 rtx copy = gen_rtx_SET (SET_DEST (swap_body), src_reg);
39977 rtx_insn *new_insn = emit_insn_before (copy,
39978 to_delete[i].replace_insn);
39979 set_block_for_insn (new_insn,
39980 BLOCK_FOR_INSN (to_delete[i].replace_insn));
39981 df_insn_rescan (new_insn);
39982 df_insn_delete (to_delete[i].replace_insn);
39983 remove_insn (to_delete[i].replace_insn);
39984 to_delete[i].replace_insn->set_deleted ();
39987 free (to_delete);
39990 /* Main entry point for this pass. */
39991 unsigned int
39992 rs6000_analyze_swaps (function *fun)
39994 swap_web_entry *insn_entry;
39995 basic_block bb;
39996 rtx_insn *insn, *curr_insn = 0;
39998 /* Dataflow analysis for use-def chains. */
39999 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
40000 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
40001 df_analyze ();
40002 df_set_flags (DF_DEFER_INSN_RESCAN);
40004 /* Pre-pass to recombine lvx and stvx patterns so we don't lose info. */
40005 recombine_lvx_stvx_patterns (fun);
40007 /* Allocate structure to represent webs of insns. */
40008 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
40010 /* Walk the insns to gather basic data. */
40011 FOR_ALL_BB_FN (bb, fun)
40012 FOR_BB_INSNS_SAFE (bb, insn, curr_insn)
40014 unsigned int uid = INSN_UID (insn);
40015 if (NONDEBUG_INSN_P (insn))
40017 insn_entry[uid].insn = insn;
40019 if (GET_CODE (insn) == CALL_INSN)
40020 insn_entry[uid].is_call = 1;
40022 /* Walk the uses and defs to see if we mention vector regs.
40023 Record any constraints on optimization of such mentions. */
40024 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
40025 df_ref mention;
40026 FOR_EACH_INSN_INFO_USE (mention, insn_info)
40028 /* We use DF_REF_REAL_REG here to get inside any subregs. */
40029 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
40031 /* If a use gets its value from a call insn, it will be
40032 a hard register and will look like (reg:V4SI 3 3).
40033 The df analysis creates two mentions for GPR3 and GPR4,
40034 both DImode. We must recognize this and treat it as a
40035 vector mention to ensure the call is unioned with this
40036 use. */
40037 if (mode == DImode && DF_REF_INSN_INFO (mention))
40039 rtx feeder = DF_REF_INSN (mention);
40040 /* FIXME: It is pretty hard to get from the df mention
40041 to the mode of the use in the insn. We arbitrarily
40042 pick a vector mode here, even though the use might
40043 be a real DImode. We can be too conservative
40044 (create a web larger than necessary) because of
40045 this, so consider eventually fixing this. */
40046 if (GET_CODE (feeder) == CALL_INSN)
40047 mode = V4SImode;
40050 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
40052 insn_entry[uid].is_relevant = 1;
40053 if (mode == TImode || mode == V1TImode
40054 || FLOAT128_VECTOR_P (mode))
40055 insn_entry[uid].is_128_int = 1;
40056 if (DF_REF_INSN_INFO (mention))
40057 insn_entry[uid].contains_subreg
40058 = !rtx_equal_p (DF_REF_REG (mention),
40059 DF_REF_REAL_REG (mention));
40060 union_defs (insn_entry, insn, mention);
40063 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
40065 /* We use DF_REF_REAL_REG here to get inside any subregs. */
40066 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
40068 /* If we're loading up a hard vector register for a call,
40069 it looks like (set (reg:V4SI 9 9) (...)). The df
40070 analysis creates two mentions for GPR9 and GPR10, both
40071 DImode. So relying on the mode from the mentions
40072 isn't sufficient to ensure we union the call into the
40073 web with the parameter setup code. */
40074 if (mode == DImode && GET_CODE (insn) == SET
40075 && ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (SET_DEST (insn))))
40076 mode = GET_MODE (SET_DEST (insn));
40078 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
40080 insn_entry[uid].is_relevant = 1;
40081 if (mode == TImode || mode == V1TImode
40082 || FLOAT128_VECTOR_P (mode))
40083 insn_entry[uid].is_128_int = 1;
40084 if (DF_REF_INSN_INFO (mention))
40085 insn_entry[uid].contains_subreg
40086 = !rtx_equal_p (DF_REF_REG (mention),
40087 DF_REF_REAL_REG (mention));
40088 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
40089 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
40090 insn_entry[uid].is_live_out = 1;
40091 union_uses (insn_entry, insn, mention);
40095 if (insn_entry[uid].is_relevant)
40097 /* Determine if this is a load or store. */
40098 insn_entry[uid].is_load = insn_is_load_p (insn);
40099 insn_entry[uid].is_store = insn_is_store_p (insn);
40101 /* Determine if this is a doubleword swap. If not,
40102 determine whether it can legally be swapped. */
40103 if (insn_is_swap_p (insn))
40104 insn_entry[uid].is_swap = 1;
40105 else
40107 unsigned int special = SH_NONE;
40108 insn_entry[uid].is_swappable
40109 = insn_is_swappable_p (insn_entry, insn, &special);
40110 if (special != SH_NONE && insn_entry[uid].contains_subreg)
40111 insn_entry[uid].is_swappable = 0;
40112 else if (special != SH_NONE)
40113 insn_entry[uid].special_handling = special;
40114 else if (insn_entry[uid].contains_subreg)
40115 insn_entry[uid].special_handling = SH_SUBREG;
40121 if (dump_file)
40123 fprintf (dump_file, "\nSwap insn entry table when first built\n");
40124 dump_swap_insn_table (insn_entry);
40127 /* Record unoptimizable webs. */
40128 unsigned e = get_max_uid (), i;
40129 for (i = 0; i < e; ++i)
40131 if (!insn_entry[i].is_relevant)
40132 continue;
40134 swap_web_entry *root
40135 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
40137 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
40138 || (insn_entry[i].contains_subreg
40139 && insn_entry[i].special_handling != SH_SUBREG)
40140 || insn_entry[i].is_128_int || insn_entry[i].is_call
40141 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
40142 root->web_not_optimizable = 1;
40144 /* If we have loads or stores that aren't permuting then the
40145 optimization isn't appropriate. */
40146 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
40147 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
40148 root->web_not_optimizable = 1;
40150 /* If we have permuting loads or stores that are not accompanied
40151 by a register swap, the optimization isn't appropriate. */
40152 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
40154 rtx insn = insn_entry[i].insn;
40155 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
40156 df_ref def;
40158 FOR_EACH_INSN_INFO_DEF (def, insn_info)
40160 struct df_link *link = DF_REF_CHAIN (def);
40162 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
40164 root->web_not_optimizable = 1;
40165 break;
40169 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
40171 rtx insn = insn_entry[i].insn;
40172 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
40173 df_ref use;
40175 FOR_EACH_INSN_INFO_USE (use, insn_info)
40177 struct df_link *link = DF_REF_CHAIN (use);
40179 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
40181 root->web_not_optimizable = 1;
40182 break;
40188 if (dump_file)
40190 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
40191 dump_swap_insn_table (insn_entry);
40194 /* For each load and store in an optimizable web (which implies
40195 the loads and stores are permuting), find the associated
40196 register swaps and mark them for removal. Due to various
40197 optimizations we may mark the same swap more than once. Also
40198 perform special handling for swappable insns that require it. */
40199 for (i = 0; i < e; ++i)
40200 if ((insn_entry[i].is_load || insn_entry[i].is_store)
40201 && insn_entry[i].is_swap)
40203 swap_web_entry* root_entry
40204 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
40205 if (!root_entry->web_not_optimizable)
40206 mark_swaps_for_removal (insn_entry, i);
40208 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
40210 swap_web_entry* root_entry
40211 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
40212 if (!root_entry->web_not_optimizable)
40213 handle_special_swappables (insn_entry, i);
40216 /* Now delete the swaps marked for removal. */
40217 for (i = 0; i < e; ++i)
40218 if (insn_entry[i].will_delete)
40219 replace_swap_with_copy (insn_entry, i);
40221 /* Clean up. */
40222 free (insn_entry);
40223 return 0;
40226 const pass_data pass_data_analyze_swaps =
40228 RTL_PASS, /* type */
40229 "swaps", /* name */
40230 OPTGROUP_NONE, /* optinfo_flags */
40231 TV_NONE, /* tv_id */
40232 0, /* properties_required */
40233 0, /* properties_provided */
40234 0, /* properties_destroyed */
40235 0, /* todo_flags_start */
40236 TODO_df_finish, /* todo_flags_finish */
40239 class pass_analyze_swaps : public rtl_opt_pass
40241 public:
40242 pass_analyze_swaps(gcc::context *ctxt)
40243 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
40246 /* opt_pass methods: */
40247 virtual bool gate (function *)
40249 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
40250 && !TARGET_P9_VECTOR && rs6000_optimize_swaps);
40253 virtual unsigned int execute (function *fun)
40255 return rs6000_analyze_swaps (fun);
40258 }; // class pass_analyze_swaps
40260 rtl_opt_pass *
40261 make_pass_analyze_swaps (gcc::context *ctxt)
40263 return new pass_analyze_swaps (ctxt);
40266 #ifdef RS6000_GLIBC_ATOMIC_FENV
40267 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
40268 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
40269 #endif
40271 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
40273 static void
40274 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
40276 if (!TARGET_HARD_FLOAT || !TARGET_FPRS)
40278 #ifdef RS6000_GLIBC_ATOMIC_FENV
40279 if (atomic_hold_decl == NULL_TREE)
40281 atomic_hold_decl
40282 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
40283 get_identifier ("__atomic_feholdexcept"),
40284 build_function_type_list (void_type_node,
40285 double_ptr_type_node,
40286 NULL_TREE));
40287 TREE_PUBLIC (atomic_hold_decl) = 1;
40288 DECL_EXTERNAL (atomic_hold_decl) = 1;
40291 if (atomic_clear_decl == NULL_TREE)
40293 atomic_clear_decl
40294 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
40295 get_identifier ("__atomic_feclearexcept"),
40296 build_function_type_list (void_type_node,
40297 NULL_TREE));
40298 TREE_PUBLIC (atomic_clear_decl) = 1;
40299 DECL_EXTERNAL (atomic_clear_decl) = 1;
40302 tree const_double = build_qualified_type (double_type_node,
40303 TYPE_QUAL_CONST);
40304 tree const_double_ptr = build_pointer_type (const_double);
40305 if (atomic_update_decl == NULL_TREE)
40307 atomic_update_decl
40308 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
40309 get_identifier ("__atomic_feupdateenv"),
40310 build_function_type_list (void_type_node,
40311 const_double_ptr,
40312 NULL_TREE));
40313 TREE_PUBLIC (atomic_update_decl) = 1;
40314 DECL_EXTERNAL (atomic_update_decl) = 1;
40317 tree fenv_var = create_tmp_var_raw (double_type_node);
40318 TREE_ADDRESSABLE (fenv_var) = 1;
40319 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
40321 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
40322 *clear = build_call_expr (atomic_clear_decl, 0);
40323 *update = build_call_expr (atomic_update_decl, 1,
40324 fold_convert (const_double_ptr, fenv_addr));
40325 #endif
40326 return;
40329 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
40330 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
40331 tree call_mffs = build_call_expr (mffs, 0);
40333 /* Generates the equivalent of feholdexcept (&fenv_var)
40335 *fenv_var = __builtin_mffs ();
40336 double fenv_hold;
40337 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
40338 __builtin_mtfsf (0xff, fenv_hold); */
40340 /* Mask to clear everything except for the rounding modes and non-IEEE
40341 arithmetic flag. */
40342 const unsigned HOST_WIDE_INT hold_exception_mask =
40343 HOST_WIDE_INT_C (0xffffffff00000007);
40345 tree fenv_var = create_tmp_var_raw (double_type_node);
40347 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
40349 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
40350 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
40351 build_int_cst (uint64_type_node,
40352 hold_exception_mask));
40354 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
40355 fenv_llu_and);
40357 tree hold_mtfsf = build_call_expr (mtfsf, 2,
40358 build_int_cst (unsigned_type_node, 0xff),
40359 fenv_hold_mtfsf);
40361 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
40363 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
40365 double fenv_clear = __builtin_mffs ();
40366 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
40367 __builtin_mtfsf (0xff, fenv_clear); */
40369 /* Mask to clear everything except for the rounding modes and non-IEEE
40370 arithmetic flag. */
40371 const unsigned HOST_WIDE_INT clear_exception_mask =
40372 HOST_WIDE_INT_C (0xffffffff00000000);
40374 tree fenv_clear = create_tmp_var_raw (double_type_node);
40376 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
40378 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
40379 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
40380 fenv_clean_llu,
40381 build_int_cst (uint64_type_node,
40382 clear_exception_mask));
40384 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
40385 fenv_clear_llu_and);
40387 tree clear_mtfsf = build_call_expr (mtfsf, 2,
40388 build_int_cst (unsigned_type_node, 0xff),
40389 fenv_clear_mtfsf);
40391 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
40393 /* Generates the equivalent of feupdateenv (&fenv_var)
40395 double old_fenv = __builtin_mffs ();
40396 double fenv_update;
40397 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
40398 (*(uint64_t*)fenv_var 0x1ff80fff);
40399 __builtin_mtfsf (0xff, fenv_update); */
40401 const unsigned HOST_WIDE_INT update_exception_mask =
40402 HOST_WIDE_INT_C (0xffffffff1fffff00);
40403 const unsigned HOST_WIDE_INT new_exception_mask =
40404 HOST_WIDE_INT_C (0x1ff80fff);
40406 tree old_fenv = create_tmp_var_raw (double_type_node);
40407 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
40409 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
40410 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
40411 build_int_cst (uint64_type_node,
40412 update_exception_mask));
40414 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
40415 build_int_cst (uint64_type_node,
40416 new_exception_mask));
40418 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
40419 old_llu_and, new_llu_and);
40421 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
40422 new_llu_mask);
40424 tree update_mtfsf = build_call_expr (mtfsf, 2,
40425 build_int_cst (unsigned_type_node, 0xff),
40426 fenv_update_mtfsf);
40428 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
40431 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
40433 static bool
40434 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
40435 optimization_type opt_type)
40437 switch (op)
40439 case rsqrt_optab:
40440 return (opt_type == OPTIMIZE_FOR_SPEED
40441 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
40443 default:
40444 return true;
40448 struct gcc_target targetm = TARGET_INITIALIZER;
40450 #include "gt-rs6000.h"